xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision bb44fb6fe7713ddcd023d5b9bacadf074d68092e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2018 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 
27 #include <fsl_usd.h>
28 #include <fsl_qman.h>
29 #include <of.h>
30 
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
35 
36 #include <rte_dpaa_bus.h>
37 #include <dpaa_sec.h>
38 #include <dpaa_sec_log.h>
39 
40 enum rta_sec_era rta_sec_era;
41 
42 int dpaa_logtype_sec;
43 
44 static uint8_t cryptodev_driver_id;
45 
46 static __thread struct rte_crypto_op **dpaa_sec_ops;
47 static __thread int dpaa_sec_op_nb;
48 
49 static int
50 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
51 
52 static inline void
53 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
54 {
55 	if (!ctx->fd_status) {
56 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
57 	} else {
58 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
59 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
60 	}
61 
62 	/* report op status to sym->op and then free the ctx memeory  */
63 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
64 }
65 
66 static inline struct dpaa_sec_op_ctx *
67 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
68 {
69 	struct dpaa_sec_op_ctx *ctx;
70 	int retval;
71 
72 	retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
73 	if (!ctx || retval) {
74 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
75 		return NULL;
76 	}
77 	/*
78 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 	 * each packet, memset is costlier than dcbz_64().
82 	 */
83 	dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
84 	dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
85 	dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
86 	dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
87 
88 	ctx->ctx_pool = ses->ctx_pool;
89 	ctx->vtop_offset = (size_t) ctx
90 				- rte_mempool_virt2iova(ctx);
91 
92 	return ctx;
93 }
94 
95 static inline rte_iova_t
96 dpaa_mem_vtop(void *vaddr)
97 {
98 	const struct rte_memseg *ms;
99 
100 	ms = rte_mem_virt2memseg(vaddr, NULL);
101 	if (ms)
102 		return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
103 	return (size_t)NULL;
104 }
105 
106 static inline void *
107 dpaa_mem_ptov(rte_iova_t paddr)
108 {
109 	return rte_mem_iova2virt(paddr);
110 }
111 
112 static void
113 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
114 		   struct qman_fq *fq,
115 		   const struct qm_mr_entry *msg)
116 {
117 	DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
118 			fq->fqid, msg->ern.rc, msg->ern.seqnum);
119 }
120 
121 /* initialize the queue with dest chan as caam chan so that
122  * all the packets in this queue could be dispatched into caam
123  */
124 static int
125 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
126 		 uint32_t fqid_out)
127 {
128 	struct qm_mcc_initfq fq_opts;
129 	uint32_t flags;
130 	int ret = -1;
131 
132 	/* Clear FQ options */
133 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
134 
135 	flags = QMAN_INITFQ_FLAG_SCHED;
136 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
137 			  QM_INITFQ_WE_CONTEXTB;
138 
139 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
140 	fq_opts.fqd.context_b = fqid_out;
141 	fq_opts.fqd.dest.channel = qm_channel_caam;
142 	fq_opts.fqd.dest.wq = 0;
143 
144 	fq_in->cb.ern  = ern_sec_fq_handler;
145 
146 	DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
147 
148 	ret = qman_init_fq(fq_in, flags, &fq_opts);
149 	if (unlikely(ret != 0))
150 		DPAA_SEC_ERR("qman_init_fq failed %d", ret);
151 
152 	return ret;
153 }
154 
155 /* something is put into in_fq and caam put the crypto result into out_fq */
156 static enum qman_cb_dqrr_result
157 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
158 		  struct qman_fq *fq __always_unused,
159 		  const struct qm_dqrr_entry *dqrr)
160 {
161 	const struct qm_fd *fd;
162 	struct dpaa_sec_job *job;
163 	struct dpaa_sec_op_ctx *ctx;
164 
165 	if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
166 		return qman_cb_dqrr_defer;
167 
168 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
169 		return qman_cb_dqrr_consume;
170 
171 	fd = &dqrr->fd;
172 	/* sg is embedded in an op ctx,
173 	 * sg[0] is for output
174 	 * sg[1] for input
175 	 */
176 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
177 
178 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
179 	ctx->fd_status = fd->status;
180 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
181 		struct qm_sg_entry *sg_out;
182 		uint32_t len;
183 
184 		sg_out = &job->sg[0];
185 		hw_sg_to_cpu(sg_out);
186 		len = sg_out->length;
187 		ctx->op->sym->m_src->pkt_len = len;
188 		ctx->op->sym->m_src->data_len = len;
189 	}
190 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
191 	dpaa_sec_op_ending(ctx);
192 
193 	return qman_cb_dqrr_consume;
194 }
195 
196 /* caam result is put into this queue */
197 static int
198 dpaa_sec_init_tx(struct qman_fq *fq)
199 {
200 	int ret;
201 	struct qm_mcc_initfq opts;
202 	uint32_t flags;
203 
204 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
205 		QMAN_FQ_FLAG_DYNAMIC_FQID;
206 
207 	ret = qman_create_fq(0, flags, fq);
208 	if (unlikely(ret)) {
209 		DPAA_SEC_ERR("qman_create_fq failed");
210 		return ret;
211 	}
212 
213 	memset(&opts, 0, sizeof(opts));
214 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
215 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
216 
217 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
218 
219 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
220 	fq->cb.ern  = ern_sec_fq_handler;
221 
222 	ret = qman_init_fq(fq, 0, &opts);
223 	if (unlikely(ret)) {
224 		DPAA_SEC_ERR("unable to init caam source fq!");
225 		return ret;
226 	}
227 
228 	return ret;
229 }
230 
231 static inline int is_cipher_only(dpaa_sec_session *ses)
232 {
233 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
234 		(ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
235 }
236 
237 static inline int is_auth_only(dpaa_sec_session *ses)
238 {
239 	return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
240 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
241 }
242 
243 static inline int is_aead(dpaa_sec_session *ses)
244 {
245 	return ((ses->cipher_alg == 0) &&
246 		(ses->auth_alg == 0) &&
247 		(ses->aead_alg != 0));
248 }
249 
250 static inline int is_auth_cipher(dpaa_sec_session *ses)
251 {
252 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
253 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
254 		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
255 }
256 
257 static inline int is_proto_ipsec(dpaa_sec_session *ses)
258 {
259 	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
260 }
261 
262 static inline int is_encode(dpaa_sec_session *ses)
263 {
264 	return ses->dir == DIR_ENC;
265 }
266 
267 static inline int is_decode(dpaa_sec_session *ses)
268 {
269 	return ses->dir == DIR_DEC;
270 }
271 
272 static inline void
273 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
274 {
275 	switch (ses->auth_alg) {
276 	case RTE_CRYPTO_AUTH_NULL:
277 		ses->digest_length = 0;
278 		break;
279 	case RTE_CRYPTO_AUTH_MD5_HMAC:
280 		alginfo_a->algtype =
281 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
282 			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
283 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
284 		break;
285 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
286 		alginfo_a->algtype =
287 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
288 			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
289 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
290 		break;
291 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
292 		alginfo_a->algtype =
293 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
294 			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
295 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
296 		break;
297 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
298 		alginfo_a->algtype =
299 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
300 			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
301 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
302 		break;
303 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
304 		alginfo_a->algtype =
305 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
306 			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
307 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
308 		break;
309 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
310 		alginfo_a->algtype =
311 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
312 			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
313 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
314 		break;
315 	default:
316 		DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
317 	}
318 }
319 
320 static inline void
321 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
322 {
323 	switch (ses->cipher_alg) {
324 	case RTE_CRYPTO_CIPHER_NULL:
325 		break;
326 	case RTE_CRYPTO_CIPHER_AES_CBC:
327 		alginfo_c->algtype =
328 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
329 			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
330 		alginfo_c->algmode = OP_ALG_AAI_CBC;
331 		break;
332 	case RTE_CRYPTO_CIPHER_3DES_CBC:
333 		alginfo_c->algtype =
334 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
335 			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
336 		alginfo_c->algmode = OP_ALG_AAI_CBC;
337 		break;
338 	case RTE_CRYPTO_CIPHER_AES_CTR:
339 		alginfo_c->algtype =
340 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
341 			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
342 		alginfo_c->algmode = OP_ALG_AAI_CTR;
343 		break;
344 	default:
345 		DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
346 	}
347 }
348 
349 static inline void
350 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
351 {
352 	switch (ses->aead_alg) {
353 	case RTE_CRYPTO_AEAD_AES_GCM:
354 		alginfo->algtype = OP_ALG_ALGSEL_AES;
355 		alginfo->algmode = OP_ALG_AAI_GCM;
356 		break;
357 	default:
358 		DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
359 	}
360 }
361 
362 
363 /* prepare command block of the session */
364 static int
365 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
366 {
367 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
368 	int32_t shared_desc_len = 0;
369 	struct sec_cdb *cdb = &ses->cdb;
370 	int err;
371 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
372 	int swap = false;
373 #else
374 	int swap = true;
375 #endif
376 
377 	memset(cdb, 0, sizeof(struct sec_cdb));
378 
379 	if (is_cipher_only(ses)) {
380 		caam_cipher_alg(ses, &alginfo_c);
381 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
382 			DPAA_SEC_ERR("not supported cipher alg");
383 			return -ENOTSUP;
384 		}
385 
386 		alginfo_c.key = (size_t)ses->cipher_key.data;
387 		alginfo_c.keylen = ses->cipher_key.length;
388 		alginfo_c.key_enc_flags = 0;
389 		alginfo_c.key_type = RTA_DATA_IMM;
390 
391 		shared_desc_len = cnstr_shdsc_blkcipher(
392 						cdb->sh_desc, true,
393 						swap, &alginfo_c,
394 						NULL,
395 						ses->iv.length,
396 						ses->dir);
397 	} else if (is_auth_only(ses)) {
398 		caam_auth_alg(ses, &alginfo_a);
399 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
400 			DPAA_SEC_ERR("not supported auth alg");
401 			return -ENOTSUP;
402 		}
403 
404 		alginfo_a.key = (size_t)ses->auth_key.data;
405 		alginfo_a.keylen = ses->auth_key.length;
406 		alginfo_a.key_enc_flags = 0;
407 		alginfo_a.key_type = RTA_DATA_IMM;
408 
409 		shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
410 						   swap, &alginfo_a,
411 						   !ses->dir,
412 						   ses->digest_length);
413 	} else if (is_aead(ses)) {
414 		caam_aead_alg(ses, &alginfo);
415 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
416 			DPAA_SEC_ERR("not supported aead alg");
417 			return -ENOTSUP;
418 		}
419 		alginfo.key = (size_t)ses->aead_key.data;
420 		alginfo.keylen = ses->aead_key.length;
421 		alginfo.key_enc_flags = 0;
422 		alginfo.key_type = RTA_DATA_IMM;
423 
424 		if (ses->dir == DIR_ENC)
425 			shared_desc_len = cnstr_shdsc_gcm_encap(
426 					cdb->sh_desc, true, swap,
427 					&alginfo,
428 					ses->iv.length,
429 					ses->digest_length);
430 		else
431 			shared_desc_len = cnstr_shdsc_gcm_decap(
432 					cdb->sh_desc, true, swap,
433 					&alginfo,
434 					ses->iv.length,
435 					ses->digest_length);
436 	} else {
437 		caam_cipher_alg(ses, &alginfo_c);
438 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
439 			DPAA_SEC_ERR("not supported cipher alg");
440 			return -ENOTSUP;
441 		}
442 
443 		alginfo_c.key = (size_t)ses->cipher_key.data;
444 		alginfo_c.keylen = ses->cipher_key.length;
445 		alginfo_c.key_enc_flags = 0;
446 		alginfo_c.key_type = RTA_DATA_IMM;
447 
448 		caam_auth_alg(ses, &alginfo_a);
449 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
450 			DPAA_SEC_ERR("not supported auth alg");
451 			return -ENOTSUP;
452 		}
453 
454 		alginfo_a.key = (size_t)ses->auth_key.data;
455 		alginfo_a.keylen = ses->auth_key.length;
456 		alginfo_a.key_enc_flags = 0;
457 		alginfo_a.key_type = RTA_DATA_IMM;
458 
459 		cdb->sh_desc[0] = alginfo_c.keylen;
460 		cdb->sh_desc[1] = alginfo_a.keylen;
461 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
462 				       MIN_JOB_DESC_SIZE,
463 				       (unsigned int *)cdb->sh_desc,
464 				       &cdb->sh_desc[2], 2);
465 
466 		if (err < 0) {
467 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
468 			return err;
469 		}
470 		if (cdb->sh_desc[2] & 1)
471 			alginfo_c.key_type = RTA_DATA_IMM;
472 		else {
473 			alginfo_c.key = (size_t)dpaa_mem_vtop(
474 						(void *)(size_t)alginfo_c.key);
475 			alginfo_c.key_type = RTA_DATA_PTR;
476 		}
477 		if (cdb->sh_desc[2] & (1<<1))
478 			alginfo_a.key_type = RTA_DATA_IMM;
479 		else {
480 			alginfo_a.key = (size_t)dpaa_mem_vtop(
481 						(void *)(size_t)alginfo_a.key);
482 			alginfo_a.key_type = RTA_DATA_PTR;
483 		}
484 		cdb->sh_desc[0] = 0;
485 		cdb->sh_desc[1] = 0;
486 		cdb->sh_desc[2] = 0;
487 		if (is_proto_ipsec(ses)) {
488 			if (ses->dir == DIR_ENC) {
489 				shared_desc_len = cnstr_shdsc_ipsec_new_encap(
490 						cdb->sh_desc,
491 						true, swap, &ses->encap_pdb,
492 						(uint8_t *)&ses->ip4_hdr,
493 						&alginfo_c, &alginfo_a);
494 			} else if (ses->dir == DIR_DEC) {
495 				shared_desc_len = cnstr_shdsc_ipsec_new_decap(
496 						cdb->sh_desc,
497 						true, swap, &ses->decap_pdb,
498 						&alginfo_c, &alginfo_a);
499 			}
500 		} else {
501 			/* Auth_only_len is set as 0 here and it will be
502 			 * overwritten in fd for each packet.
503 			 */
504 			shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
505 					true, swap, &alginfo_c, &alginfo_a,
506 					ses->iv.length, 0,
507 					ses->digest_length, ses->dir);
508 		}
509 	}
510 
511 	if (shared_desc_len < 0) {
512 		DPAA_SEC_ERR("error in preparing command block");
513 		return shared_desc_len;
514 	}
515 
516 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
517 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
518 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
519 
520 	return 0;
521 }
522 
523 /* qp is lockless, should be accessed by only one thread */
524 static int
525 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
526 {
527 	struct qman_fq *fq;
528 	unsigned int pkts = 0;
529 	int num_rx_bufs, ret;
530 	struct qm_dqrr_entry *dq;
531 	uint32_t vdqcr_flags = 0;
532 
533 	fq = &qp->outq;
534 	/*
535 	 * Until request for four buffers, we provide exact number of buffers.
536 	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
537 	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
538 	 * requested, so we request two less in this case.
539 	 */
540 	if (nb_ops < 4) {
541 		vdqcr_flags = QM_VDQCR_EXACT;
542 		num_rx_bufs = nb_ops;
543 	} else {
544 		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
545 			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
546 	}
547 	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
548 	if (ret)
549 		return 0;
550 
551 	do {
552 		const struct qm_fd *fd;
553 		struct dpaa_sec_job *job;
554 		struct dpaa_sec_op_ctx *ctx;
555 		struct rte_crypto_op *op;
556 
557 		dq = qman_dequeue(fq);
558 		if (!dq)
559 			continue;
560 
561 		fd = &dq->fd;
562 		/* sg is embedded in an op ctx,
563 		 * sg[0] is for output
564 		 * sg[1] for input
565 		 */
566 		job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
567 
568 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
569 		ctx->fd_status = fd->status;
570 		op = ctx->op;
571 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
572 			struct qm_sg_entry *sg_out;
573 			uint32_t len;
574 
575 			sg_out = &job->sg[0];
576 			hw_sg_to_cpu(sg_out);
577 			len = sg_out->length;
578 			op->sym->m_src->pkt_len = len;
579 			op->sym->m_src->data_len = len;
580 		}
581 		if (!ctx->fd_status) {
582 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
583 		} else {
584 			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
585 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
586 		}
587 		ops[pkts++] = op;
588 
589 		/* report op status to sym->op and then free the ctx memeory */
590 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
591 
592 		qman_dqrr_consume(fq, dq);
593 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
594 
595 	return pkts;
596 }
597 
598 static inline struct dpaa_sec_job *
599 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
600 {
601 	struct rte_crypto_sym_op *sym = op->sym;
602 	struct rte_mbuf *mbuf = sym->m_src;
603 	struct dpaa_sec_job *cf;
604 	struct dpaa_sec_op_ctx *ctx;
605 	struct qm_sg_entry *sg, *out_sg, *in_sg;
606 	phys_addr_t start_addr;
607 	uint8_t *old_digest, extra_segs;
608 
609 	if (is_decode(ses))
610 		extra_segs = 3;
611 	else
612 		extra_segs = 2;
613 
614 	if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
615 		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
616 				MAX_SG_ENTRIES);
617 		return NULL;
618 	}
619 	ctx = dpaa_sec_alloc_ctx(ses);
620 	if (!ctx)
621 		return NULL;
622 
623 	cf = &ctx->job;
624 	ctx->op = op;
625 	old_digest = ctx->digest;
626 
627 	/* output */
628 	out_sg = &cf->sg[0];
629 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
630 	out_sg->length = ses->digest_length;
631 	cpu_to_hw_sg(out_sg);
632 
633 	/* input */
634 	in_sg = &cf->sg[1];
635 	/* need to extend the input to a compound frame */
636 	in_sg->extension = 1;
637 	in_sg->final = 1;
638 	in_sg->length = sym->auth.data.length;
639 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
640 
641 	/* 1st seg */
642 	sg = in_sg + 1;
643 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
644 	sg->length = mbuf->data_len - sym->auth.data.offset;
645 	sg->offset = sym->auth.data.offset;
646 
647 	/* Successive segs */
648 	mbuf = mbuf->next;
649 	while (mbuf) {
650 		cpu_to_hw_sg(sg);
651 		sg++;
652 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
653 		sg->length = mbuf->data_len;
654 		mbuf = mbuf->next;
655 	}
656 
657 	if (is_decode(ses)) {
658 		/* Digest verification case */
659 		cpu_to_hw_sg(sg);
660 		sg++;
661 		rte_memcpy(old_digest, sym->auth.digest.data,
662 				ses->digest_length);
663 		start_addr = dpaa_mem_vtop(old_digest);
664 		qm_sg_entry_set64(sg, start_addr);
665 		sg->length = ses->digest_length;
666 		in_sg->length += ses->digest_length;
667 	} else {
668 		/* Digest calculation case */
669 		sg->length -= ses->digest_length;
670 	}
671 	sg->final = 1;
672 	cpu_to_hw_sg(sg);
673 	cpu_to_hw_sg(in_sg);
674 
675 	return cf;
676 }
677 
678 /**
679  * packet looks like:
680  *		|<----data_len------->|
681  *    |ip_header|ah_header|icv|payload|
682  *              ^
683  *		|
684  *	   mbuf->pkt.data
685  */
686 static inline struct dpaa_sec_job *
687 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
688 {
689 	struct rte_crypto_sym_op *sym = op->sym;
690 	struct rte_mbuf *mbuf = sym->m_src;
691 	struct dpaa_sec_job *cf;
692 	struct dpaa_sec_op_ctx *ctx;
693 	struct qm_sg_entry *sg;
694 	rte_iova_t start_addr;
695 	uint8_t *old_digest;
696 
697 	ctx = dpaa_sec_alloc_ctx(ses);
698 	if (!ctx)
699 		return NULL;
700 
701 	cf = &ctx->job;
702 	ctx->op = op;
703 	old_digest = ctx->digest;
704 
705 	start_addr = rte_pktmbuf_iova(mbuf);
706 	/* output */
707 	sg = &cf->sg[0];
708 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
709 	sg->length = ses->digest_length;
710 	cpu_to_hw_sg(sg);
711 
712 	/* input */
713 	sg = &cf->sg[1];
714 	if (is_decode(ses)) {
715 		/* need to extend the input to a compound frame */
716 		sg->extension = 1;
717 		qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
718 		sg->length = sym->auth.data.length + ses->digest_length;
719 		sg->final = 1;
720 		cpu_to_hw_sg(sg);
721 
722 		sg = &cf->sg[2];
723 		/* hash result or digest, save digest first */
724 		rte_memcpy(old_digest, sym->auth.digest.data,
725 			   ses->digest_length);
726 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
727 		sg->length = sym->auth.data.length;
728 		cpu_to_hw_sg(sg);
729 
730 		/* let's check digest by hw */
731 		start_addr = dpaa_mem_vtop(old_digest);
732 		sg++;
733 		qm_sg_entry_set64(sg, start_addr);
734 		sg->length = ses->digest_length;
735 		sg->final = 1;
736 		cpu_to_hw_sg(sg);
737 	} else {
738 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
739 		sg->length = sym->auth.data.length;
740 		sg->final = 1;
741 		cpu_to_hw_sg(sg);
742 	}
743 
744 	return cf;
745 }
746 
747 static inline struct dpaa_sec_job *
748 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
749 {
750 	struct rte_crypto_sym_op *sym = op->sym;
751 	struct dpaa_sec_job *cf;
752 	struct dpaa_sec_op_ctx *ctx;
753 	struct qm_sg_entry *sg, *out_sg, *in_sg;
754 	struct rte_mbuf *mbuf;
755 	uint8_t req_segs;
756 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
757 			ses->iv.offset);
758 
759 	if (sym->m_dst) {
760 		mbuf = sym->m_dst;
761 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
762 	} else {
763 		mbuf = sym->m_src;
764 		req_segs = mbuf->nb_segs * 2 + 3;
765 	}
766 
767 	if (req_segs > MAX_SG_ENTRIES) {
768 		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
769 				MAX_SG_ENTRIES);
770 		return NULL;
771 	}
772 
773 	ctx = dpaa_sec_alloc_ctx(ses);
774 	if (!ctx)
775 		return NULL;
776 
777 	cf = &ctx->job;
778 	ctx->op = op;
779 
780 	/* output */
781 	out_sg = &cf->sg[0];
782 	out_sg->extension = 1;
783 	out_sg->length = sym->cipher.data.length;
784 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
785 	cpu_to_hw_sg(out_sg);
786 
787 	/* 1st seg */
788 	sg = &cf->sg[2];
789 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
790 	sg->length = mbuf->data_len - sym->cipher.data.offset;
791 	sg->offset = sym->cipher.data.offset;
792 
793 	/* Successive segs */
794 	mbuf = mbuf->next;
795 	while (mbuf) {
796 		cpu_to_hw_sg(sg);
797 		sg++;
798 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
799 		sg->length = mbuf->data_len;
800 		mbuf = mbuf->next;
801 	}
802 	sg->final = 1;
803 	cpu_to_hw_sg(sg);
804 
805 	/* input */
806 	mbuf = sym->m_src;
807 	in_sg = &cf->sg[1];
808 	in_sg->extension = 1;
809 	in_sg->final = 1;
810 	in_sg->length = sym->cipher.data.length + ses->iv.length;
811 
812 	sg++;
813 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
814 	cpu_to_hw_sg(in_sg);
815 
816 	/* IV */
817 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
818 	sg->length = ses->iv.length;
819 	cpu_to_hw_sg(sg);
820 
821 	/* 1st seg */
822 	sg++;
823 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
824 	sg->length = mbuf->data_len - sym->cipher.data.offset;
825 	sg->offset = sym->cipher.data.offset;
826 
827 	/* Successive segs */
828 	mbuf = mbuf->next;
829 	while (mbuf) {
830 		cpu_to_hw_sg(sg);
831 		sg++;
832 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
833 		sg->length = mbuf->data_len;
834 		mbuf = mbuf->next;
835 	}
836 	sg->final = 1;
837 	cpu_to_hw_sg(sg);
838 
839 	return cf;
840 }
841 
842 static inline struct dpaa_sec_job *
843 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
844 {
845 	struct rte_crypto_sym_op *sym = op->sym;
846 	struct dpaa_sec_job *cf;
847 	struct dpaa_sec_op_ctx *ctx;
848 	struct qm_sg_entry *sg;
849 	rte_iova_t src_start_addr, dst_start_addr;
850 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
851 			ses->iv.offset);
852 
853 	ctx = dpaa_sec_alloc_ctx(ses);
854 	if (!ctx)
855 		return NULL;
856 
857 	cf = &ctx->job;
858 	ctx->op = op;
859 
860 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
861 
862 	if (sym->m_dst)
863 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
864 	else
865 		dst_start_addr = src_start_addr;
866 
867 	/* output */
868 	sg = &cf->sg[0];
869 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
870 	sg->length = sym->cipher.data.length + ses->iv.length;
871 	cpu_to_hw_sg(sg);
872 
873 	/* input */
874 	sg = &cf->sg[1];
875 
876 	/* need to extend the input to a compound frame */
877 	sg->extension = 1;
878 	sg->final = 1;
879 	sg->length = sym->cipher.data.length + ses->iv.length;
880 	qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
881 	cpu_to_hw_sg(sg);
882 
883 	sg = &cf->sg[2];
884 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
885 	sg->length = ses->iv.length;
886 	cpu_to_hw_sg(sg);
887 
888 	sg++;
889 	qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
890 	sg->length = sym->cipher.data.length;
891 	sg->final = 1;
892 	cpu_to_hw_sg(sg);
893 
894 	return cf;
895 }
896 
897 static inline struct dpaa_sec_job *
898 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
899 {
900 	struct rte_crypto_sym_op *sym = op->sym;
901 	struct dpaa_sec_job *cf;
902 	struct dpaa_sec_op_ctx *ctx;
903 	struct qm_sg_entry *sg, *out_sg, *in_sg;
904 	struct rte_mbuf *mbuf;
905 	uint8_t req_segs;
906 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
907 			ses->iv.offset);
908 
909 	if (sym->m_dst) {
910 		mbuf = sym->m_dst;
911 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
912 	} else {
913 		mbuf = sym->m_src;
914 		req_segs = mbuf->nb_segs * 2 + 4;
915 	}
916 
917 	if (ses->auth_only_len)
918 		req_segs++;
919 
920 	if (req_segs > MAX_SG_ENTRIES) {
921 		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
922 				MAX_SG_ENTRIES);
923 		return NULL;
924 	}
925 
926 	ctx = dpaa_sec_alloc_ctx(ses);
927 	if (!ctx)
928 		return NULL;
929 
930 	cf = &ctx->job;
931 	ctx->op = op;
932 
933 	rte_prefetch0(cf->sg);
934 
935 	/* output */
936 	out_sg = &cf->sg[0];
937 	out_sg->extension = 1;
938 	if (is_encode(ses))
939 		out_sg->length = sym->aead.data.length + ses->auth_only_len
940 						+ ses->digest_length;
941 	else
942 		out_sg->length = sym->aead.data.length + ses->auth_only_len;
943 
944 	/* output sg entries */
945 	sg = &cf->sg[2];
946 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
947 	cpu_to_hw_sg(out_sg);
948 
949 	/* 1st seg */
950 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
951 	sg->length = mbuf->data_len - sym->aead.data.offset +
952 					ses->auth_only_len;
953 	sg->offset = sym->aead.data.offset - ses->auth_only_len;
954 
955 	/* Successive segs */
956 	mbuf = mbuf->next;
957 	while (mbuf) {
958 		cpu_to_hw_sg(sg);
959 		sg++;
960 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
961 		sg->length = mbuf->data_len;
962 		mbuf = mbuf->next;
963 	}
964 	sg->length -= ses->digest_length;
965 
966 	if (is_encode(ses)) {
967 		cpu_to_hw_sg(sg);
968 		/* set auth output */
969 		sg++;
970 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
971 		sg->length = ses->digest_length;
972 	}
973 	sg->final = 1;
974 	cpu_to_hw_sg(sg);
975 
976 	/* input */
977 	mbuf = sym->m_src;
978 	in_sg = &cf->sg[1];
979 	in_sg->extension = 1;
980 	in_sg->final = 1;
981 	if (is_encode(ses))
982 		in_sg->length = ses->iv.length + sym->aead.data.length
983 							+ ses->auth_only_len;
984 	else
985 		in_sg->length = ses->iv.length + sym->aead.data.length
986 				+ ses->auth_only_len + ses->digest_length;
987 
988 	/* input sg entries */
989 	sg++;
990 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
991 	cpu_to_hw_sg(in_sg);
992 
993 	/* 1st seg IV */
994 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
995 	sg->length = ses->iv.length;
996 	cpu_to_hw_sg(sg);
997 
998 	/* 2nd seg auth only */
999 	if (ses->auth_only_len) {
1000 		sg++;
1001 		qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1002 		sg->length = ses->auth_only_len;
1003 		cpu_to_hw_sg(sg);
1004 	}
1005 
1006 	/* 3rd seg */
1007 	sg++;
1008 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1009 	sg->length = mbuf->data_len - sym->aead.data.offset;
1010 	sg->offset = sym->aead.data.offset;
1011 
1012 	/* Successive segs */
1013 	mbuf = mbuf->next;
1014 	while (mbuf) {
1015 		cpu_to_hw_sg(sg);
1016 		sg++;
1017 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1018 		sg->length = mbuf->data_len;
1019 		mbuf = mbuf->next;
1020 	}
1021 
1022 	if (is_decode(ses)) {
1023 		cpu_to_hw_sg(sg);
1024 		sg++;
1025 		memcpy(ctx->digest, sym->aead.digest.data,
1026 			ses->digest_length);
1027 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1028 		sg->length = ses->digest_length;
1029 	}
1030 	sg->final = 1;
1031 	cpu_to_hw_sg(sg);
1032 
1033 	return cf;
1034 }
1035 
1036 static inline struct dpaa_sec_job *
1037 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1038 {
1039 	struct rte_crypto_sym_op *sym = op->sym;
1040 	struct dpaa_sec_job *cf;
1041 	struct dpaa_sec_op_ctx *ctx;
1042 	struct qm_sg_entry *sg;
1043 	uint32_t length = 0;
1044 	rte_iova_t src_start_addr, dst_start_addr;
1045 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1046 			ses->iv.offset);
1047 
1048 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1049 
1050 	if (sym->m_dst)
1051 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1052 	else
1053 		dst_start_addr = src_start_addr;
1054 
1055 	ctx = dpaa_sec_alloc_ctx(ses);
1056 	if (!ctx)
1057 		return NULL;
1058 
1059 	cf = &ctx->job;
1060 	ctx->op = op;
1061 
1062 	/* input */
1063 	rte_prefetch0(cf->sg);
1064 	sg = &cf->sg[2];
1065 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1066 	if (is_encode(ses)) {
1067 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1068 		sg->length = ses->iv.length;
1069 		length += sg->length;
1070 		cpu_to_hw_sg(sg);
1071 
1072 		sg++;
1073 		if (ses->auth_only_len) {
1074 			qm_sg_entry_set64(sg,
1075 					  dpaa_mem_vtop(sym->aead.aad.data));
1076 			sg->length = ses->auth_only_len;
1077 			length += sg->length;
1078 			cpu_to_hw_sg(sg);
1079 			sg++;
1080 		}
1081 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1082 		sg->length = sym->aead.data.length;
1083 		length += sg->length;
1084 		sg->final = 1;
1085 		cpu_to_hw_sg(sg);
1086 	} else {
1087 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1088 		sg->length = ses->iv.length;
1089 		length += sg->length;
1090 		cpu_to_hw_sg(sg);
1091 
1092 		sg++;
1093 		if (ses->auth_only_len) {
1094 			qm_sg_entry_set64(sg,
1095 					  dpaa_mem_vtop(sym->aead.aad.data));
1096 			sg->length = ses->auth_only_len;
1097 			length += sg->length;
1098 			cpu_to_hw_sg(sg);
1099 			sg++;
1100 		}
1101 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1102 		sg->length = sym->aead.data.length;
1103 		length += sg->length;
1104 		cpu_to_hw_sg(sg);
1105 
1106 		memcpy(ctx->digest, sym->aead.digest.data,
1107 		       ses->digest_length);
1108 		sg++;
1109 
1110 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1111 		sg->length = ses->digest_length;
1112 		length += sg->length;
1113 		sg->final = 1;
1114 		cpu_to_hw_sg(sg);
1115 	}
1116 	/* input compound frame */
1117 	cf->sg[1].length = length;
1118 	cf->sg[1].extension = 1;
1119 	cf->sg[1].final = 1;
1120 	cpu_to_hw_sg(&cf->sg[1]);
1121 
1122 	/* output */
1123 	sg++;
1124 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1125 	qm_sg_entry_set64(sg,
1126 		dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1127 	sg->length = sym->aead.data.length + ses->auth_only_len;
1128 	length = sg->length;
1129 	if (is_encode(ses)) {
1130 		cpu_to_hw_sg(sg);
1131 		/* set auth output */
1132 		sg++;
1133 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1134 		sg->length = ses->digest_length;
1135 		length += sg->length;
1136 	}
1137 	sg->final = 1;
1138 	cpu_to_hw_sg(sg);
1139 
1140 	/* output compound frame */
1141 	cf->sg[0].length = length;
1142 	cf->sg[0].extension = 1;
1143 	cpu_to_hw_sg(&cf->sg[0]);
1144 
1145 	return cf;
1146 }
1147 
1148 static inline struct dpaa_sec_job *
1149 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1150 {
1151 	struct rte_crypto_sym_op *sym = op->sym;
1152 	struct dpaa_sec_job *cf;
1153 	struct dpaa_sec_op_ctx *ctx;
1154 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1155 	struct rte_mbuf *mbuf;
1156 	uint8_t req_segs;
1157 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1158 			ses->iv.offset);
1159 
1160 	if (sym->m_dst) {
1161 		mbuf = sym->m_dst;
1162 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1163 	} else {
1164 		mbuf = sym->m_src;
1165 		req_segs = mbuf->nb_segs * 2 + 4;
1166 	}
1167 
1168 	if (req_segs > MAX_SG_ENTRIES) {
1169 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1170 				MAX_SG_ENTRIES);
1171 		return NULL;
1172 	}
1173 
1174 	ctx = dpaa_sec_alloc_ctx(ses);
1175 	if (!ctx)
1176 		return NULL;
1177 
1178 	cf = &ctx->job;
1179 	ctx->op = op;
1180 
1181 	rte_prefetch0(cf->sg);
1182 
1183 	/* output */
1184 	out_sg = &cf->sg[0];
1185 	out_sg->extension = 1;
1186 	if (is_encode(ses))
1187 		out_sg->length = sym->auth.data.length + ses->digest_length;
1188 	else
1189 		out_sg->length = sym->auth.data.length;
1190 
1191 	/* output sg entries */
1192 	sg = &cf->sg[2];
1193 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1194 	cpu_to_hw_sg(out_sg);
1195 
1196 	/* 1st seg */
1197 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1198 	sg->length = mbuf->data_len - sym->auth.data.offset;
1199 	sg->offset = sym->auth.data.offset;
1200 
1201 	/* Successive segs */
1202 	mbuf = mbuf->next;
1203 	while (mbuf) {
1204 		cpu_to_hw_sg(sg);
1205 		sg++;
1206 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1207 		sg->length = mbuf->data_len;
1208 		mbuf = mbuf->next;
1209 	}
1210 	sg->length -= ses->digest_length;
1211 
1212 	if (is_encode(ses)) {
1213 		cpu_to_hw_sg(sg);
1214 		/* set auth output */
1215 		sg++;
1216 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1217 		sg->length = ses->digest_length;
1218 	}
1219 	sg->final = 1;
1220 	cpu_to_hw_sg(sg);
1221 
1222 	/* input */
1223 	mbuf = sym->m_src;
1224 	in_sg = &cf->sg[1];
1225 	in_sg->extension = 1;
1226 	in_sg->final = 1;
1227 	if (is_encode(ses))
1228 		in_sg->length = ses->iv.length + sym->auth.data.length;
1229 	else
1230 		in_sg->length = ses->iv.length + sym->auth.data.length
1231 						+ ses->digest_length;
1232 
1233 	/* input sg entries */
1234 	sg++;
1235 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1236 	cpu_to_hw_sg(in_sg);
1237 
1238 	/* 1st seg IV */
1239 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1240 	sg->length = ses->iv.length;
1241 	cpu_to_hw_sg(sg);
1242 
1243 	/* 2nd seg */
1244 	sg++;
1245 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1246 	sg->length = mbuf->data_len - sym->auth.data.offset;
1247 	sg->offset = sym->auth.data.offset;
1248 
1249 	/* Successive segs */
1250 	mbuf = mbuf->next;
1251 	while (mbuf) {
1252 		cpu_to_hw_sg(sg);
1253 		sg++;
1254 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1255 		sg->length = mbuf->data_len;
1256 		mbuf = mbuf->next;
1257 	}
1258 
1259 	sg->length -= ses->digest_length;
1260 	if (is_decode(ses)) {
1261 		cpu_to_hw_sg(sg);
1262 		sg++;
1263 		memcpy(ctx->digest, sym->auth.digest.data,
1264 			ses->digest_length);
1265 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1266 		sg->length = ses->digest_length;
1267 	}
1268 	sg->final = 1;
1269 	cpu_to_hw_sg(sg);
1270 
1271 	return cf;
1272 }
1273 
1274 static inline struct dpaa_sec_job *
1275 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1276 {
1277 	struct rte_crypto_sym_op *sym = op->sym;
1278 	struct dpaa_sec_job *cf;
1279 	struct dpaa_sec_op_ctx *ctx;
1280 	struct qm_sg_entry *sg;
1281 	rte_iova_t src_start_addr, dst_start_addr;
1282 	uint32_t length = 0;
1283 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1284 			ses->iv.offset);
1285 
1286 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1287 	if (sym->m_dst)
1288 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1289 	else
1290 		dst_start_addr = src_start_addr;
1291 
1292 	ctx = dpaa_sec_alloc_ctx(ses);
1293 	if (!ctx)
1294 		return NULL;
1295 
1296 	cf = &ctx->job;
1297 	ctx->op = op;
1298 
1299 	/* input */
1300 	rte_prefetch0(cf->sg);
1301 	sg = &cf->sg[2];
1302 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1303 	if (is_encode(ses)) {
1304 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1305 		sg->length = ses->iv.length;
1306 		length += sg->length;
1307 		cpu_to_hw_sg(sg);
1308 
1309 		sg++;
1310 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1311 		sg->length = sym->auth.data.length;
1312 		length += sg->length;
1313 		sg->final = 1;
1314 		cpu_to_hw_sg(sg);
1315 	} else {
1316 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1317 		sg->length = ses->iv.length;
1318 		length += sg->length;
1319 		cpu_to_hw_sg(sg);
1320 
1321 		sg++;
1322 
1323 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1324 		sg->length = sym->auth.data.length;
1325 		length += sg->length;
1326 		cpu_to_hw_sg(sg);
1327 
1328 		memcpy(ctx->digest, sym->auth.digest.data,
1329 		       ses->digest_length);
1330 		sg++;
1331 
1332 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1333 		sg->length = ses->digest_length;
1334 		length += sg->length;
1335 		sg->final = 1;
1336 		cpu_to_hw_sg(sg);
1337 	}
1338 	/* input compound frame */
1339 	cf->sg[1].length = length;
1340 	cf->sg[1].extension = 1;
1341 	cf->sg[1].final = 1;
1342 	cpu_to_hw_sg(&cf->sg[1]);
1343 
1344 	/* output */
1345 	sg++;
1346 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1347 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1348 	sg->length = sym->cipher.data.length;
1349 	length = sg->length;
1350 	if (is_encode(ses)) {
1351 		cpu_to_hw_sg(sg);
1352 		/* set auth output */
1353 		sg++;
1354 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1355 		sg->length = ses->digest_length;
1356 		length += sg->length;
1357 	}
1358 	sg->final = 1;
1359 	cpu_to_hw_sg(sg);
1360 
1361 	/* output compound frame */
1362 	cf->sg[0].length = length;
1363 	cf->sg[0].extension = 1;
1364 	cpu_to_hw_sg(&cf->sg[0]);
1365 
1366 	return cf;
1367 }
1368 
1369 static inline struct dpaa_sec_job *
1370 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1371 {
1372 	struct rte_crypto_sym_op *sym = op->sym;
1373 	struct dpaa_sec_job *cf;
1374 	struct dpaa_sec_op_ctx *ctx;
1375 	struct qm_sg_entry *sg;
1376 	phys_addr_t src_start_addr, dst_start_addr;
1377 
1378 	ctx = dpaa_sec_alloc_ctx(ses);
1379 	if (!ctx)
1380 		return NULL;
1381 	cf = &ctx->job;
1382 	ctx->op = op;
1383 
1384 	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1385 
1386 	if (sym->m_dst)
1387 		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1388 	else
1389 		dst_start_addr = src_start_addr;
1390 
1391 	/* input */
1392 	sg = &cf->sg[1];
1393 	qm_sg_entry_set64(sg, src_start_addr);
1394 	sg->length = sym->m_src->pkt_len;
1395 	sg->final = 1;
1396 	cpu_to_hw_sg(sg);
1397 
1398 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1399 	/* output */
1400 	sg = &cf->sg[0];
1401 	qm_sg_entry_set64(sg, dst_start_addr);
1402 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1403 	cpu_to_hw_sg(sg);
1404 
1405 	return cf;
1406 }
1407 
1408 static uint16_t
1409 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1410 		       uint16_t nb_ops)
1411 {
1412 	/* Function to transmit the frames to given device and queuepair */
1413 	uint32_t loop;
1414 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1415 	uint16_t num_tx = 0;
1416 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1417 	uint32_t frames_to_send;
1418 	struct rte_crypto_op *op;
1419 	struct dpaa_sec_job *cf;
1420 	dpaa_sec_session *ses;
1421 	uint32_t auth_only_len;
1422 	struct qman_fq *inq[DPAA_SEC_BURST];
1423 
1424 	while (nb_ops) {
1425 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1426 				DPAA_SEC_BURST : nb_ops;
1427 		for (loop = 0; loop < frames_to_send; loop++) {
1428 			op = *(ops++);
1429 			switch (op->sess_type) {
1430 			case RTE_CRYPTO_OP_WITH_SESSION:
1431 				ses = (dpaa_sec_session *)
1432 					get_sym_session_private_data(
1433 							op->sym->session,
1434 							cryptodev_driver_id);
1435 				break;
1436 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1437 				ses = (dpaa_sec_session *)
1438 					get_sec_session_private_data(
1439 							op->sym->sec_session);
1440 				break;
1441 			default:
1442 				DPAA_SEC_DP_ERR(
1443 					"sessionless crypto op not supported");
1444 				frames_to_send = loop;
1445 				nb_ops = loop;
1446 				goto send_pkts;
1447 			}
1448 			if (unlikely(!ses->qp || ses->qp != qp)) {
1449 				DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
1450 					     ses->qp, qp);
1451 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1452 					frames_to_send = loop;
1453 					nb_ops = loop;
1454 					goto send_pkts;
1455 				}
1456 			}
1457 
1458 			auth_only_len = op->sym->auth.data.length -
1459 						op->sym->cipher.data.length;
1460 			if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1461 				if (is_auth_only(ses)) {
1462 					cf = build_auth_only(op, ses);
1463 				} else if (is_cipher_only(ses)) {
1464 					cf = build_cipher_only(op, ses);
1465 				} else if (is_aead(ses)) {
1466 					cf = build_cipher_auth_gcm(op, ses);
1467 					auth_only_len = ses->auth_only_len;
1468 				} else if (is_auth_cipher(ses)) {
1469 					cf = build_cipher_auth(op, ses);
1470 				} else if (is_proto_ipsec(ses)) {
1471 					cf = build_proto(op, ses);
1472 				} else {
1473 					DPAA_SEC_DP_ERR("not supported ops");
1474 					frames_to_send = loop;
1475 					nb_ops = loop;
1476 					goto send_pkts;
1477 				}
1478 			} else {
1479 				if (is_auth_only(ses)) {
1480 					cf = build_auth_only_sg(op, ses);
1481 				} else if (is_cipher_only(ses)) {
1482 					cf = build_cipher_only_sg(op, ses);
1483 				} else if (is_aead(ses)) {
1484 					cf = build_cipher_auth_gcm_sg(op, ses);
1485 					auth_only_len = ses->auth_only_len;
1486 				} else if (is_auth_cipher(ses)) {
1487 					cf = build_cipher_auth_sg(op, ses);
1488 				} else {
1489 					DPAA_SEC_DP_ERR("not supported ops");
1490 					frames_to_send = loop;
1491 					nb_ops = loop;
1492 					goto send_pkts;
1493 				}
1494 			}
1495 			if (unlikely(!cf)) {
1496 				frames_to_send = loop;
1497 				nb_ops = loop;
1498 				goto send_pkts;
1499 			}
1500 
1501 			fd = &fds[loop];
1502 			inq[loop] = ses->inq;
1503 			fd->opaque_addr = 0;
1504 			fd->cmd = 0;
1505 			qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1506 			fd->_format1 = qm_fd_compound;
1507 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
1508 			/* Auth_only_len is set as 0 in descriptor and it is
1509 			 * overwritten here in the fd.cmd which will update
1510 			 * the DPOVRD reg.
1511 			 */
1512 			if (auth_only_len)
1513 				fd->cmd = 0x80000000 | auth_only_len;
1514 
1515 		}
1516 send_pkts:
1517 		loop = 0;
1518 		while (loop < frames_to_send) {
1519 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1520 					frames_to_send - loop);
1521 		}
1522 		nb_ops -= frames_to_send;
1523 		num_tx += frames_to_send;
1524 	}
1525 
1526 	dpaa_qp->tx_pkts += num_tx;
1527 	dpaa_qp->tx_errs += nb_ops - num_tx;
1528 
1529 	return num_tx;
1530 }
1531 
1532 static uint16_t
1533 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1534 		       uint16_t nb_ops)
1535 {
1536 	uint16_t num_rx;
1537 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1538 
1539 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1540 
1541 	dpaa_qp->rx_pkts += num_rx;
1542 	dpaa_qp->rx_errs += nb_ops - num_rx;
1543 
1544 	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1545 
1546 	return num_rx;
1547 }
1548 
1549 /** Release queue pair */
1550 static int
1551 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1552 			    uint16_t qp_id)
1553 {
1554 	struct dpaa_sec_dev_private *internals;
1555 	struct dpaa_sec_qp *qp = NULL;
1556 
1557 	PMD_INIT_FUNC_TRACE();
1558 
1559 	DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1560 
1561 	internals = dev->data->dev_private;
1562 	if (qp_id >= internals->max_nb_queue_pairs) {
1563 		DPAA_SEC_ERR("Max supported qpid %d",
1564 			     internals->max_nb_queue_pairs);
1565 		return -EINVAL;
1566 	}
1567 
1568 	qp = &internals->qps[qp_id];
1569 	qp->internals = NULL;
1570 	dev->data->queue_pairs[qp_id] = NULL;
1571 
1572 	return 0;
1573 }
1574 
1575 /** Setup a queue pair */
1576 static int
1577 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1578 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1579 		__rte_unused int socket_id,
1580 		__rte_unused struct rte_mempool *session_pool)
1581 {
1582 	struct dpaa_sec_dev_private *internals;
1583 	struct dpaa_sec_qp *qp = NULL;
1584 
1585 	DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1586 
1587 	internals = dev->data->dev_private;
1588 	if (qp_id >= internals->max_nb_queue_pairs) {
1589 		DPAA_SEC_ERR("Max supported qpid %d",
1590 			     internals->max_nb_queue_pairs);
1591 		return -EINVAL;
1592 	}
1593 
1594 	qp = &internals->qps[qp_id];
1595 	qp->internals = internals;
1596 	dev->data->queue_pairs[qp_id] = qp;
1597 
1598 	return 0;
1599 }
1600 
1601 /** Return the number of allocated queue pairs */
1602 static uint32_t
1603 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1604 {
1605 	PMD_INIT_FUNC_TRACE();
1606 
1607 	return dev->data->nb_queue_pairs;
1608 }
1609 
1610 /** Returns the size of session structure */
1611 static unsigned int
1612 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1613 {
1614 	PMD_INIT_FUNC_TRACE();
1615 
1616 	return sizeof(dpaa_sec_session);
1617 }
1618 
1619 static int
1620 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1621 		     struct rte_crypto_sym_xform *xform,
1622 		     dpaa_sec_session *session)
1623 {
1624 	session->cipher_alg = xform->cipher.algo;
1625 	session->iv.length = xform->cipher.iv.length;
1626 	session->iv.offset = xform->cipher.iv.offset;
1627 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1628 					       RTE_CACHE_LINE_SIZE);
1629 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1630 		DPAA_SEC_ERR("No Memory for cipher key");
1631 		return -ENOMEM;
1632 	}
1633 	session->cipher_key.length = xform->cipher.key.length;
1634 
1635 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1636 	       xform->cipher.key.length);
1637 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1638 			DIR_ENC : DIR_DEC;
1639 
1640 	return 0;
1641 }
1642 
1643 static int
1644 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1645 		   struct rte_crypto_sym_xform *xform,
1646 		   dpaa_sec_session *session)
1647 {
1648 	session->auth_alg = xform->auth.algo;
1649 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1650 					     RTE_CACHE_LINE_SIZE);
1651 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1652 		DPAA_SEC_ERR("No Memory for auth key");
1653 		return -ENOMEM;
1654 	}
1655 	session->auth_key.length = xform->auth.key.length;
1656 	session->digest_length = xform->auth.digest_length;
1657 
1658 	memcpy(session->auth_key.data, xform->auth.key.data,
1659 	       xform->auth.key.length);
1660 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1661 			DIR_ENC : DIR_DEC;
1662 
1663 	return 0;
1664 }
1665 
1666 static int
1667 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1668 		   struct rte_crypto_sym_xform *xform,
1669 		   dpaa_sec_session *session)
1670 {
1671 	session->aead_alg = xform->aead.algo;
1672 	session->iv.length = xform->aead.iv.length;
1673 	session->iv.offset = xform->aead.iv.offset;
1674 	session->auth_only_len = xform->aead.aad_length;
1675 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1676 					     RTE_CACHE_LINE_SIZE);
1677 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1678 		DPAA_SEC_ERR("No Memory for aead key\n");
1679 		return -ENOMEM;
1680 	}
1681 	session->aead_key.length = xform->aead.key.length;
1682 	session->digest_length = xform->aead.digest_length;
1683 
1684 	memcpy(session->aead_key.data, xform->aead.key.data,
1685 	       xform->aead.key.length);
1686 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1687 			DIR_ENC : DIR_DEC;
1688 
1689 	return 0;
1690 }
1691 
1692 static struct qman_fq *
1693 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1694 {
1695 	unsigned int i;
1696 
1697 	for (i = 0; i < qi->max_nb_sessions; i++) {
1698 		if (qi->inq_attach[i] == 0) {
1699 			qi->inq_attach[i] = 1;
1700 			return &qi->inq[i];
1701 		}
1702 	}
1703 	DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
1704 
1705 	return NULL;
1706 }
1707 
1708 static int
1709 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1710 {
1711 	unsigned int i;
1712 
1713 	for (i = 0; i < qi->max_nb_sessions; i++) {
1714 		if (&qi->inq[i] == fq) {
1715 			qman_retire_fq(fq, NULL);
1716 			qman_oos_fq(fq);
1717 			qi->inq_attach[i] = 0;
1718 			return 0;
1719 		}
1720 	}
1721 	return -1;
1722 }
1723 
1724 static int
1725 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1726 {
1727 	int ret;
1728 
1729 	sess->qp = qp;
1730 	ret = dpaa_sec_prep_cdb(sess);
1731 	if (ret) {
1732 		DPAA_SEC_ERR("Unable to prepare sec cdb");
1733 		return -1;
1734 	}
1735 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1736 		ret = rte_dpaa_portal_init((void *)0);
1737 		if (ret) {
1738 			DPAA_SEC_ERR("Failure in affining portal");
1739 			return ret;
1740 		}
1741 	}
1742 	ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1743 			       qman_fq_fqid(&qp->outq));
1744 	if (ret)
1745 		DPAA_SEC_ERR("Unable to init sec queue");
1746 
1747 	return ret;
1748 }
1749 
1750 static int
1751 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1752 			    struct rte_crypto_sym_xform *xform,	void *sess)
1753 {
1754 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1755 	dpaa_sec_session *session = sess;
1756 
1757 	PMD_INIT_FUNC_TRACE();
1758 
1759 	if (unlikely(sess == NULL)) {
1760 		DPAA_SEC_ERR("invalid session struct");
1761 		return -EINVAL;
1762 	}
1763 
1764 	/* Default IV length = 0 */
1765 	session->iv.length = 0;
1766 
1767 	/* Cipher Only */
1768 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1769 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1770 		dpaa_sec_cipher_init(dev, xform, session);
1771 
1772 	/* Authentication Only */
1773 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1774 		   xform->next == NULL) {
1775 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1776 		dpaa_sec_auth_init(dev, xform, session);
1777 
1778 	/* Cipher then Authenticate */
1779 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1780 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1781 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1782 			dpaa_sec_cipher_init(dev, xform, session);
1783 			dpaa_sec_auth_init(dev, xform->next, session);
1784 		} else {
1785 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
1786 			return -EINVAL;
1787 		}
1788 
1789 	/* Authenticate then Cipher */
1790 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1791 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1792 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1793 			dpaa_sec_auth_init(dev, xform, session);
1794 			dpaa_sec_cipher_init(dev, xform->next, session);
1795 		} else {
1796 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
1797 			return -EINVAL;
1798 		}
1799 
1800 	/* AEAD operation for AES-GCM kind of Algorithms */
1801 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1802 		   xform->next == NULL) {
1803 		dpaa_sec_aead_init(dev, xform, session);
1804 
1805 	} else {
1806 		DPAA_SEC_ERR("Invalid crypto type");
1807 		return -EINVAL;
1808 	}
1809 	session->ctx_pool = internals->ctx_pool;
1810 	session->inq = dpaa_sec_attach_rxq(internals);
1811 	if (session->inq == NULL) {
1812 		DPAA_SEC_ERR("unable to attach sec queue");
1813 		goto err1;
1814 	}
1815 
1816 	return 0;
1817 
1818 err1:
1819 	rte_free(session->cipher_key.data);
1820 	rte_free(session->auth_key.data);
1821 	memset(session, 0, sizeof(dpaa_sec_session));
1822 
1823 	return -EINVAL;
1824 }
1825 
1826 static int
1827 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
1828 		struct rte_crypto_sym_xform *xform,
1829 		struct rte_cryptodev_sym_session *sess,
1830 		struct rte_mempool *mempool)
1831 {
1832 	void *sess_private_data;
1833 	int ret;
1834 
1835 	PMD_INIT_FUNC_TRACE();
1836 
1837 	if (rte_mempool_get(mempool, &sess_private_data)) {
1838 		DPAA_SEC_ERR("Couldn't get object from session mempool");
1839 		return -ENOMEM;
1840 	}
1841 
1842 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1843 	if (ret != 0) {
1844 		DPAA_SEC_ERR("failed to configure session parameters");
1845 
1846 		/* Return session to mempool */
1847 		rte_mempool_put(mempool, sess_private_data);
1848 		return ret;
1849 	}
1850 
1851 	set_sym_session_private_data(sess, dev->driver_id,
1852 			sess_private_data);
1853 
1854 
1855 	return 0;
1856 }
1857 
1858 /** Clear the memory of session so it doesn't leave key material behind */
1859 static void
1860 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
1861 		struct rte_cryptodev_sym_session *sess)
1862 {
1863 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1864 	uint8_t index = dev->driver_id;
1865 	void *sess_priv = get_sym_session_private_data(sess, index);
1866 
1867 	PMD_INIT_FUNC_TRACE();
1868 
1869 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1870 
1871 	if (sess_priv) {
1872 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1873 
1874 		if (s->inq)
1875 			dpaa_sec_detach_rxq(qi, s->inq);
1876 		rte_free(s->cipher_key.data);
1877 		rte_free(s->auth_key.data);
1878 		memset(s, 0, sizeof(dpaa_sec_session));
1879 		set_sym_session_private_data(sess, index, NULL);
1880 		rte_mempool_put(sess_mp, sess_priv);
1881 	}
1882 }
1883 
1884 static int
1885 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1886 			   struct rte_security_session_conf *conf,
1887 			   void *sess)
1888 {
1889 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1890 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1891 	struct rte_crypto_auth_xform *auth_xform;
1892 	struct rte_crypto_cipher_xform *cipher_xform;
1893 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
1894 
1895 	PMD_INIT_FUNC_TRACE();
1896 
1897 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1898 		cipher_xform = &conf->crypto_xform->cipher;
1899 		auth_xform = &conf->crypto_xform->next->auth;
1900 	} else {
1901 		auth_xform = &conf->crypto_xform->auth;
1902 		cipher_xform = &conf->crypto_xform->next->cipher;
1903 	}
1904 	session->proto_alg = conf->protocol;
1905 	session->cipher_key.data = rte_zmalloc(NULL,
1906 					       cipher_xform->key.length,
1907 					       RTE_CACHE_LINE_SIZE);
1908 	if (session->cipher_key.data == NULL &&
1909 			cipher_xform->key.length > 0) {
1910 		DPAA_SEC_ERR("No Memory for cipher key");
1911 		return -ENOMEM;
1912 	}
1913 
1914 	session->cipher_key.length = cipher_xform->key.length;
1915 	session->auth_key.data = rte_zmalloc(NULL,
1916 					auth_xform->key.length,
1917 					RTE_CACHE_LINE_SIZE);
1918 	if (session->auth_key.data == NULL &&
1919 			auth_xform->key.length > 0) {
1920 		DPAA_SEC_ERR("No Memory for auth key");
1921 		rte_free(session->cipher_key.data);
1922 		return -ENOMEM;
1923 	}
1924 	session->auth_key.length = auth_xform->key.length;
1925 	memcpy(session->cipher_key.data, cipher_xform->key.data,
1926 			cipher_xform->key.length);
1927 	memcpy(session->auth_key.data, auth_xform->key.data,
1928 			auth_xform->key.length);
1929 
1930 	switch (auth_xform->algo) {
1931 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1932 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1933 		break;
1934 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1935 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1936 		break;
1937 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1938 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1939 		break;
1940 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1941 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1942 		break;
1943 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1944 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1945 		break;
1946 	case RTE_CRYPTO_AUTH_AES_CMAC:
1947 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1948 		break;
1949 	case RTE_CRYPTO_AUTH_NULL:
1950 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1951 		break;
1952 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1953 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1954 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1955 	case RTE_CRYPTO_AUTH_SHA1:
1956 	case RTE_CRYPTO_AUTH_SHA256:
1957 	case RTE_CRYPTO_AUTH_SHA512:
1958 	case RTE_CRYPTO_AUTH_SHA224:
1959 	case RTE_CRYPTO_AUTH_SHA384:
1960 	case RTE_CRYPTO_AUTH_MD5:
1961 	case RTE_CRYPTO_AUTH_AES_GMAC:
1962 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1963 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1964 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
1965 		DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
1966 			auth_xform->algo);
1967 		goto out;
1968 	default:
1969 		DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
1970 			auth_xform->algo);
1971 		goto out;
1972 	}
1973 
1974 	switch (cipher_xform->algo) {
1975 	case RTE_CRYPTO_CIPHER_AES_CBC:
1976 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1977 		break;
1978 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1979 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1980 		break;
1981 	case RTE_CRYPTO_CIPHER_AES_CTR:
1982 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1983 		break;
1984 	case RTE_CRYPTO_CIPHER_NULL:
1985 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1986 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1987 	case RTE_CRYPTO_CIPHER_AES_ECB:
1988 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1989 		DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1990 			cipher_xform->algo);
1991 		goto out;
1992 	default:
1993 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
1994 			cipher_xform->algo);
1995 		goto out;
1996 	}
1997 
1998 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1999 		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2000 				sizeof(session->ip4_hdr));
2001 		session->ip4_hdr.ip_v = IPVERSION;
2002 		session->ip4_hdr.ip_hl = 5;
2003 		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2004 						sizeof(session->ip4_hdr));
2005 		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2006 		session->ip4_hdr.ip_id = 0;
2007 		session->ip4_hdr.ip_off = 0;
2008 		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2009 		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2010 				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2011 				: IPPROTO_AH;
2012 		session->ip4_hdr.ip_sum = 0;
2013 		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2014 		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2015 		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2016 						(void *)&session->ip4_hdr,
2017 						sizeof(struct ip));
2018 
2019 		session->encap_pdb.options =
2020 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2021 			PDBOPTS_ESP_OIHI_PDB_INL |
2022 			PDBOPTS_ESP_IVSRC |
2023 			PDBHMO_ESP_ENCAP_DTTL;
2024 		session->encap_pdb.spi = ipsec_xform->spi;
2025 		session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2026 
2027 		session->dir = DIR_ENC;
2028 	} else if (ipsec_xform->direction ==
2029 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2030 		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2031 		session->decap_pdb.options = sizeof(struct ip) << 16;
2032 		session->dir = DIR_DEC;
2033 	} else
2034 		goto out;
2035 	session->ctx_pool = internals->ctx_pool;
2036 	session->inq = dpaa_sec_attach_rxq(internals);
2037 	if (session->inq == NULL) {
2038 		DPAA_SEC_ERR("unable to attach sec queue");
2039 		goto out;
2040 	}
2041 
2042 
2043 	return 0;
2044 out:
2045 	rte_free(session->auth_key.data);
2046 	rte_free(session->cipher_key.data);
2047 	memset(session, 0, sizeof(dpaa_sec_session));
2048 	return -1;
2049 }
2050 
2051 static int
2052 dpaa_sec_security_session_create(void *dev,
2053 				 struct rte_security_session_conf *conf,
2054 				 struct rte_security_session *sess,
2055 				 struct rte_mempool *mempool)
2056 {
2057 	void *sess_private_data;
2058 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2059 	int ret;
2060 
2061 	if (rte_mempool_get(mempool, &sess_private_data)) {
2062 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2063 		return -ENOMEM;
2064 	}
2065 
2066 	switch (conf->protocol) {
2067 	case RTE_SECURITY_PROTOCOL_IPSEC:
2068 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
2069 				sess_private_data);
2070 		break;
2071 	case RTE_SECURITY_PROTOCOL_MACSEC:
2072 		return -ENOTSUP;
2073 	default:
2074 		return -EINVAL;
2075 	}
2076 	if (ret != 0) {
2077 		DPAA_SEC_ERR("failed to configure session parameters");
2078 		/* Return session to mempool */
2079 		rte_mempool_put(mempool, sess_private_data);
2080 		return ret;
2081 	}
2082 
2083 	set_sec_session_private_data(sess, sess_private_data);
2084 
2085 	return ret;
2086 }
2087 
2088 /** Clear the memory of session so it doesn't leave key material behind */
2089 static int
2090 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2091 		struct rte_security_session *sess)
2092 {
2093 	PMD_INIT_FUNC_TRACE();
2094 	void *sess_priv = get_sec_session_private_data(sess);
2095 
2096 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2097 
2098 	if (sess_priv) {
2099 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2100 
2101 		rte_free(s->cipher_key.data);
2102 		rte_free(s->auth_key.data);
2103 		memset(sess, 0, sizeof(dpaa_sec_session));
2104 		set_sec_session_private_data(sess, NULL);
2105 		rte_mempool_put(sess_mp, sess_priv);
2106 	}
2107 	return 0;
2108 }
2109 
2110 
2111 static int
2112 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2113 		       struct rte_cryptodev_config *config __rte_unused)
2114 {
2115 
2116 	char str[20];
2117 	struct dpaa_sec_dev_private *internals;
2118 
2119 	PMD_INIT_FUNC_TRACE();
2120 
2121 	internals = dev->data->dev_private;
2122 	sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2123 	if (!internals->ctx_pool) {
2124 		internals->ctx_pool = rte_mempool_create((const char *)str,
2125 							CTX_POOL_NUM_BUFS,
2126 							CTX_POOL_BUF_SIZE,
2127 							CTX_POOL_CACHE_SIZE, 0,
2128 							NULL, NULL, NULL, NULL,
2129 							SOCKET_ID_ANY, 0);
2130 		if (!internals->ctx_pool) {
2131 			DPAA_SEC_ERR("%s create failed\n", str);
2132 			return -ENOMEM;
2133 		}
2134 	} else
2135 		DPAA_SEC_INFO("mempool already created for dev_id : %d",
2136 				dev->data->dev_id);
2137 
2138 	return 0;
2139 }
2140 
2141 static int
2142 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2143 {
2144 	PMD_INIT_FUNC_TRACE();
2145 	return 0;
2146 }
2147 
2148 static void
2149 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2150 {
2151 	PMD_INIT_FUNC_TRACE();
2152 }
2153 
2154 static int
2155 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2156 {
2157 	struct dpaa_sec_dev_private *internals;
2158 
2159 	PMD_INIT_FUNC_TRACE();
2160 
2161 	if (dev == NULL)
2162 		return -ENOMEM;
2163 
2164 	internals = dev->data->dev_private;
2165 	rte_mempool_free(internals->ctx_pool);
2166 	internals->ctx_pool = NULL;
2167 
2168 	return 0;
2169 }
2170 
2171 static void
2172 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2173 		       struct rte_cryptodev_info *info)
2174 {
2175 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2176 
2177 	PMD_INIT_FUNC_TRACE();
2178 	if (info != NULL) {
2179 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2180 		info->feature_flags = dev->feature_flags;
2181 		info->capabilities = dpaa_sec_capabilities;
2182 		info->sym.max_nb_sessions = internals->max_nb_sessions;
2183 		info->driver_id = cryptodev_driver_id;
2184 	}
2185 }
2186 
2187 static struct rte_cryptodev_ops crypto_ops = {
2188 	.dev_configure	      = dpaa_sec_dev_configure,
2189 	.dev_start	      = dpaa_sec_dev_start,
2190 	.dev_stop	      = dpaa_sec_dev_stop,
2191 	.dev_close	      = dpaa_sec_dev_close,
2192 	.dev_infos_get        = dpaa_sec_dev_infos_get,
2193 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
2194 	.queue_pair_release   = dpaa_sec_queue_pair_release,
2195 	.queue_pair_count     = dpaa_sec_queue_pair_count,
2196 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
2197 	.sym_session_configure    = dpaa_sec_sym_session_configure,
2198 	.sym_session_clear        = dpaa_sec_sym_session_clear
2199 };
2200 
2201 static const struct rte_security_capability *
2202 dpaa_sec_capabilities_get(void *device __rte_unused)
2203 {
2204 	return dpaa_sec_security_cap;
2205 }
2206 
2207 struct rte_security_ops dpaa_sec_security_ops = {
2208 	.session_create = dpaa_sec_security_session_create,
2209 	.session_update = NULL,
2210 	.session_stats_get = NULL,
2211 	.session_destroy = dpaa_sec_security_session_destroy,
2212 	.set_pkt_metadata = NULL,
2213 	.capabilities_get = dpaa_sec_capabilities_get
2214 };
2215 
2216 static int
2217 dpaa_sec_uninit(struct rte_cryptodev *dev)
2218 {
2219 	struct dpaa_sec_dev_private *internals;
2220 
2221 	if (dev == NULL)
2222 		return -ENODEV;
2223 
2224 	internals = dev->data->dev_private;
2225 	rte_free(dev->security_ctx);
2226 
2227 	/* In case close has been called, internals->ctx_pool would be NULL */
2228 	rte_mempool_free(internals->ctx_pool);
2229 	rte_free(internals);
2230 
2231 	DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2232 		      dev->data->name, rte_socket_id());
2233 
2234 	return 0;
2235 }
2236 
2237 static int
2238 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2239 {
2240 	struct dpaa_sec_dev_private *internals;
2241 	struct rte_security_ctx *security_instance;
2242 	struct dpaa_sec_qp *qp;
2243 	uint32_t i, flags;
2244 	int ret;
2245 
2246 	PMD_INIT_FUNC_TRACE();
2247 
2248 	cryptodev->driver_id = cryptodev_driver_id;
2249 	cryptodev->dev_ops = &crypto_ops;
2250 
2251 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2252 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2253 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2254 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2255 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2256 			RTE_CRYPTODEV_FF_SECURITY |
2257 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2258 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2259 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2260 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2261 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2262 
2263 	internals = cryptodev->data->dev_private;
2264 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2265 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2266 
2267 	/*
2268 	 * For secondary processes, we don't initialise any further as primary
2269 	 * has already done this work. Only check we don't need a different
2270 	 * RX function
2271 	 */
2272 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2273 		DPAA_SEC_WARN("Device already init by primary process");
2274 		return 0;
2275 	}
2276 
2277 	/* Initialize security_ctx only for primary process*/
2278 	security_instance = rte_malloc("rte_security_instances_ops",
2279 				sizeof(struct rte_security_ctx), 0);
2280 	if (security_instance == NULL)
2281 		return -ENOMEM;
2282 	security_instance->device = (void *)cryptodev;
2283 	security_instance->ops = &dpaa_sec_security_ops;
2284 	security_instance->sess_cnt = 0;
2285 	cryptodev->security_ctx = security_instance;
2286 
2287 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2288 		/* init qman fq for queue pair */
2289 		qp = &internals->qps[i];
2290 		ret = dpaa_sec_init_tx(&qp->outq);
2291 		if (ret) {
2292 			DPAA_SEC_ERR("config tx of queue pair  %d", i);
2293 			goto init_error;
2294 		}
2295 	}
2296 
2297 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2298 		QMAN_FQ_FLAG_TO_DCPORTAL;
2299 	for (i = 0; i < internals->max_nb_sessions; i++) {
2300 		/* create rx qman fq for sessions*/
2301 		ret = qman_create_fq(0, flags, &internals->inq[i]);
2302 		if (unlikely(ret != 0)) {
2303 			DPAA_SEC_ERR("sec qman_create_fq failed");
2304 			goto init_error;
2305 		}
2306 	}
2307 
2308 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2309 	return 0;
2310 
2311 init_error:
2312 	DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2313 
2314 	dpaa_sec_uninit(cryptodev);
2315 	return -EFAULT;
2316 }
2317 
2318 static int
2319 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2320 				struct rte_dpaa_device *dpaa_dev)
2321 {
2322 	struct rte_cryptodev *cryptodev;
2323 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2324 
2325 	int retval;
2326 
2327 	sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2328 
2329 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2330 	if (cryptodev == NULL)
2331 		return -ENOMEM;
2332 
2333 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2334 		cryptodev->data->dev_private = rte_zmalloc_socket(
2335 					"cryptodev private structure",
2336 					sizeof(struct dpaa_sec_dev_private),
2337 					RTE_CACHE_LINE_SIZE,
2338 					rte_socket_id());
2339 
2340 		if (cryptodev->data->dev_private == NULL)
2341 			rte_panic("Cannot allocate memzone for private "
2342 					"device data");
2343 	}
2344 
2345 	dpaa_dev->crypto_dev = cryptodev;
2346 	cryptodev->device = &dpaa_dev->device;
2347 	cryptodev->device->driver = &dpaa_drv->driver;
2348 
2349 	/* init user callbacks */
2350 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
2351 
2352 	/* if sec device version is not configured */
2353 	if (!rta_get_sec_era()) {
2354 		const struct device_node *caam_node;
2355 
2356 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2357 			const uint32_t *prop = of_get_property(caam_node,
2358 					"fsl,sec-era",
2359 					NULL);
2360 			if (prop) {
2361 				rta_set_sec_era(
2362 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2363 				break;
2364 			}
2365 		}
2366 	}
2367 
2368 	/* Invoke PMD device initialization function */
2369 	retval = dpaa_sec_dev_init(cryptodev);
2370 	if (retval == 0)
2371 		return 0;
2372 
2373 	/* In case of error, cleanup is done */
2374 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2375 		rte_free(cryptodev->data->dev_private);
2376 
2377 	rte_cryptodev_pmd_release_device(cryptodev);
2378 
2379 	return -ENXIO;
2380 }
2381 
2382 static int
2383 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2384 {
2385 	struct rte_cryptodev *cryptodev;
2386 	int ret;
2387 
2388 	cryptodev = dpaa_dev->crypto_dev;
2389 	if (cryptodev == NULL)
2390 		return -ENODEV;
2391 
2392 	ret = dpaa_sec_uninit(cryptodev);
2393 	if (ret)
2394 		return ret;
2395 
2396 	return rte_cryptodev_pmd_destroy(cryptodev);
2397 }
2398 
2399 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2400 	.drv_type = FSL_DPAA_CRYPTO,
2401 	.driver = {
2402 		.name = "DPAA SEC PMD"
2403 	},
2404 	.probe = cryptodev_dpaa_sec_probe,
2405 	.remove = cryptodev_dpaa_sec_remove,
2406 };
2407 
2408 static struct cryptodev_driver dpaa_sec_crypto_drv;
2409 
2410 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2411 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2412 		cryptodev_driver_id);
2413 
2414 RTE_INIT(dpaa_sec_init_log)
2415 {
2416 	dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2417 	if (dpaa_logtype_sec >= 0)
2418 		rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
2419 }
2420