xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision 2a7bb4fdf61e9edfb7adbaecb50e728b82da9e23)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2018 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
27 
28 #include <fsl_usd.h>
29 #include <fsl_qman.h>
30 #include <of.h>
31 
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
37 
38 #include <rte_dpaa_bus.h>
39 #include <dpaa_sec.h>
40 #include <dpaa_sec_log.h>
41 
42 enum rta_sec_era rta_sec_era;
43 
44 int dpaa_logtype_sec;
45 
46 static uint8_t cryptodev_driver_id;
47 
48 static __thread struct rte_crypto_op **dpaa_sec_ops;
49 static __thread int dpaa_sec_op_nb;
50 
51 static int
52 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
53 
54 static inline void
55 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
56 {
57 	if (!ctx->fd_status) {
58 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
59 	} else {
60 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
61 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
62 	}
63 
64 	/* report op status to sym->op and then free the ctx memeory  */
65 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
66 }
67 
68 static inline struct dpaa_sec_op_ctx *
69 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
70 {
71 	struct dpaa_sec_op_ctx *ctx;
72 	int retval;
73 
74 	retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
75 	if (!ctx || retval) {
76 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
77 		return NULL;
78 	}
79 	/*
80 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
81 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
82 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
83 	 * each packet, memset is costlier than dcbz_64().
84 	 */
85 	dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
86 	dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
87 	dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
88 	dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
89 
90 	ctx->ctx_pool = ses->ctx_pool;
91 	ctx->vtop_offset = (size_t) ctx
92 				- rte_mempool_virt2iova(ctx);
93 
94 	return ctx;
95 }
96 
97 static inline rte_iova_t
98 dpaa_mem_vtop(void *vaddr)
99 {
100 	const struct rte_memseg *ms;
101 
102 	ms = rte_mem_virt2memseg(vaddr, NULL);
103 	if (ms)
104 		return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
105 	return (size_t)NULL;
106 }
107 
108 static inline void *
109 dpaa_mem_ptov(rte_iova_t paddr)
110 {
111 	void *va;
112 
113 	va = (void *)dpaax_iova_table_get_va(paddr);
114 	if (likely(va))
115 		return va;
116 
117 	return rte_mem_iova2virt(paddr);
118 }
119 
120 static void
121 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
122 		   struct qman_fq *fq,
123 		   const struct qm_mr_entry *msg)
124 {
125 	DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
126 			fq->fqid, msg->ern.rc, msg->ern.seqnum);
127 }
128 
129 /* initialize the queue with dest chan as caam chan so that
130  * all the packets in this queue could be dispatched into caam
131  */
132 static int
133 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
134 		 uint32_t fqid_out)
135 {
136 	struct qm_mcc_initfq fq_opts;
137 	uint32_t flags;
138 	int ret = -1;
139 
140 	/* Clear FQ options */
141 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
142 
143 	flags = QMAN_INITFQ_FLAG_SCHED;
144 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
145 			  QM_INITFQ_WE_CONTEXTB;
146 
147 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
148 	fq_opts.fqd.context_b = fqid_out;
149 	fq_opts.fqd.dest.channel = qm_channel_caam;
150 	fq_opts.fqd.dest.wq = 0;
151 
152 	fq_in->cb.ern  = ern_sec_fq_handler;
153 
154 	DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
155 
156 	ret = qman_init_fq(fq_in, flags, &fq_opts);
157 	if (unlikely(ret != 0))
158 		DPAA_SEC_ERR("qman_init_fq failed %d", ret);
159 
160 	return ret;
161 }
162 
163 /* something is put into in_fq and caam put the crypto result into out_fq */
164 static enum qman_cb_dqrr_result
165 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
166 		  struct qman_fq *fq __always_unused,
167 		  const struct qm_dqrr_entry *dqrr)
168 {
169 	const struct qm_fd *fd;
170 	struct dpaa_sec_job *job;
171 	struct dpaa_sec_op_ctx *ctx;
172 
173 	if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
174 		return qman_cb_dqrr_defer;
175 
176 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
177 		return qman_cb_dqrr_consume;
178 
179 	fd = &dqrr->fd;
180 	/* sg is embedded in an op ctx,
181 	 * sg[0] is for output
182 	 * sg[1] for input
183 	 */
184 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
185 
186 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
187 	ctx->fd_status = fd->status;
188 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
189 		struct qm_sg_entry *sg_out;
190 		uint32_t len;
191 
192 		sg_out = &job->sg[0];
193 		hw_sg_to_cpu(sg_out);
194 		len = sg_out->length;
195 		ctx->op->sym->m_src->pkt_len = len;
196 		ctx->op->sym->m_src->data_len = len;
197 	}
198 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
199 	dpaa_sec_op_ending(ctx);
200 
201 	return qman_cb_dqrr_consume;
202 }
203 
204 /* caam result is put into this queue */
205 static int
206 dpaa_sec_init_tx(struct qman_fq *fq)
207 {
208 	int ret;
209 	struct qm_mcc_initfq opts;
210 	uint32_t flags;
211 
212 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
213 		QMAN_FQ_FLAG_DYNAMIC_FQID;
214 
215 	ret = qman_create_fq(0, flags, fq);
216 	if (unlikely(ret)) {
217 		DPAA_SEC_ERR("qman_create_fq failed");
218 		return ret;
219 	}
220 
221 	memset(&opts, 0, sizeof(opts));
222 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
223 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
224 
225 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
226 
227 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
228 	fq->cb.ern  = ern_sec_fq_handler;
229 
230 	ret = qman_init_fq(fq, 0, &opts);
231 	if (unlikely(ret)) {
232 		DPAA_SEC_ERR("unable to init caam source fq!");
233 		return ret;
234 	}
235 
236 	return ret;
237 }
238 
239 static inline int is_cipher_only(dpaa_sec_session *ses)
240 {
241 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
242 		(ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
243 }
244 
245 static inline int is_auth_only(dpaa_sec_session *ses)
246 {
247 	return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
248 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
249 }
250 
251 static inline int is_aead(dpaa_sec_session *ses)
252 {
253 	return ((ses->cipher_alg == 0) &&
254 		(ses->auth_alg == 0) &&
255 		(ses->aead_alg != 0));
256 }
257 
258 static inline int is_auth_cipher(dpaa_sec_session *ses)
259 {
260 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
261 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
262 		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
263 }
264 
265 static inline int is_proto_ipsec(dpaa_sec_session *ses)
266 {
267 	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
268 }
269 
270 static inline int is_proto_pdcp(dpaa_sec_session *ses)
271 {
272 	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
273 }
274 
275 static inline int is_encode(dpaa_sec_session *ses)
276 {
277 	return ses->dir == DIR_ENC;
278 }
279 
280 static inline int is_decode(dpaa_sec_session *ses)
281 {
282 	return ses->dir == DIR_DEC;
283 }
284 
285 static inline void
286 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
287 {
288 	switch (ses->auth_alg) {
289 	case RTE_CRYPTO_AUTH_NULL:
290 		alginfo_a->algtype =
291 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
292 			OP_PCL_IPSEC_HMAC_NULL : 0;
293 		ses->digest_length = 0;
294 		break;
295 	case RTE_CRYPTO_AUTH_MD5_HMAC:
296 		alginfo_a->algtype =
297 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
298 			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
299 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
300 		break;
301 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
302 		alginfo_a->algtype =
303 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
304 			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
305 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
306 		break;
307 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
308 		alginfo_a->algtype =
309 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
310 			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
311 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
312 		break;
313 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
314 		alginfo_a->algtype =
315 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
316 			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
317 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
318 		break;
319 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
320 		alginfo_a->algtype =
321 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
322 			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
323 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
324 		break;
325 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
326 		alginfo_a->algtype =
327 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
328 			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
329 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
330 		break;
331 	default:
332 		DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
333 	}
334 }
335 
336 static inline void
337 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
338 {
339 	switch (ses->cipher_alg) {
340 	case RTE_CRYPTO_CIPHER_NULL:
341 		alginfo_c->algtype =
342 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
343 			OP_PCL_IPSEC_NULL : 0;
344 		break;
345 	case RTE_CRYPTO_CIPHER_AES_CBC:
346 		alginfo_c->algtype =
347 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
348 			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
349 		alginfo_c->algmode = OP_ALG_AAI_CBC;
350 		break;
351 	case RTE_CRYPTO_CIPHER_3DES_CBC:
352 		alginfo_c->algtype =
353 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
354 			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
355 		alginfo_c->algmode = OP_ALG_AAI_CBC;
356 		break;
357 	case RTE_CRYPTO_CIPHER_AES_CTR:
358 		alginfo_c->algtype =
359 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
360 			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
361 		alginfo_c->algmode = OP_ALG_AAI_CTR;
362 		break;
363 	default:
364 		DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
365 	}
366 }
367 
368 static inline void
369 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
370 {
371 	switch (ses->aead_alg) {
372 	case RTE_CRYPTO_AEAD_AES_GCM:
373 		alginfo->algtype = OP_ALG_ALGSEL_AES;
374 		alginfo->algmode = OP_ALG_AAI_GCM;
375 		break;
376 	default:
377 		DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
378 	}
379 }
380 
381 static int
382 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
383 {
384 	struct alginfo authdata = {0}, cipherdata = {0};
385 	struct sec_cdb *cdb = &ses->cdb;
386 	int32_t shared_desc_len = 0;
387 	int err;
388 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
389 	int swap = false;
390 #else
391 	int swap = true;
392 #endif
393 
394 	switch (ses->cipher_alg) {
395 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
396 		cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
397 		break;
398 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
399 		cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
400 		break;
401 	case RTE_CRYPTO_CIPHER_AES_CTR:
402 		cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
403 		break;
404 	case RTE_CRYPTO_CIPHER_NULL:
405 		cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
406 		break;
407 	default:
408 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
409 			      ses->cipher_alg);
410 		return -1;
411 	}
412 
413 	cipherdata.key = (size_t)ses->cipher_key.data;
414 	cipherdata.keylen = ses->cipher_key.length;
415 	cipherdata.key_enc_flags = 0;
416 	cipherdata.key_type = RTA_DATA_IMM;
417 
418 	if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
419 		switch (ses->auth_alg) {
420 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
421 			authdata.algtype = PDCP_AUTH_TYPE_SNOW;
422 			break;
423 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
424 			authdata.algtype = PDCP_AUTH_TYPE_ZUC;
425 			break;
426 		case RTE_CRYPTO_AUTH_AES_CMAC:
427 			authdata.algtype = PDCP_AUTH_TYPE_AES;
428 			break;
429 		case RTE_CRYPTO_AUTH_NULL:
430 			authdata.algtype = PDCP_AUTH_TYPE_NULL;
431 			break;
432 		default:
433 			DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
434 				      ses->auth_alg);
435 			return -1;
436 		}
437 
438 		authdata.key = (size_t)ses->auth_key.data;
439 		authdata.keylen = ses->auth_key.length;
440 		authdata.key_enc_flags = 0;
441 		authdata.key_type = RTA_DATA_IMM;
442 
443 		cdb->sh_desc[0] = cipherdata.keylen;
444 		cdb->sh_desc[1] = authdata.keylen;
445 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
446 				       MIN_JOB_DESC_SIZE,
447 				       (unsigned int *)cdb->sh_desc,
448 				       &cdb->sh_desc[2], 2);
449 
450 		if (err < 0) {
451 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
452 			return err;
453 		}
454 		if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
455 			cipherdata.key = (size_t)dpaa_mem_vtop(
456 						(void *)(size_t)cipherdata.key);
457 			cipherdata.key_type = RTA_DATA_PTR;
458 		}
459 		if (!(cdb->sh_desc[2] & (1<<1)) &&  authdata.keylen) {
460 			authdata.key = (size_t)dpaa_mem_vtop(
461 						(void *)(size_t)authdata.key);
462 			authdata.key_type = RTA_DATA_PTR;
463 		}
464 
465 		cdb->sh_desc[0] = 0;
466 		cdb->sh_desc[1] = 0;
467 		cdb->sh_desc[2] = 0;
468 
469 		if (ses->dir == DIR_ENC)
470 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
471 					cdb->sh_desc, 1, swap,
472 					ses->pdcp.hfn,
473 					ses->pdcp.bearer,
474 					ses->pdcp.pkt_dir,
475 					ses->pdcp.hfn_threshold,
476 					&cipherdata, &authdata,
477 					0);
478 		else if (ses->dir == DIR_DEC)
479 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
480 					cdb->sh_desc, 1, swap,
481 					ses->pdcp.hfn,
482 					ses->pdcp.bearer,
483 					ses->pdcp.pkt_dir,
484 					ses->pdcp.hfn_threshold,
485 					&cipherdata, &authdata,
486 					0);
487 	} else {
488 		cdb->sh_desc[0] = cipherdata.keylen;
489 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
490 				       MIN_JOB_DESC_SIZE,
491 				       (unsigned int *)cdb->sh_desc,
492 				       &cdb->sh_desc[2], 1);
493 
494 		if (err < 0) {
495 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
496 			return err;
497 		}
498 		if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
499 			cipherdata.key = (size_t)dpaa_mem_vtop(
500 						(void *)(size_t)cipherdata.key);
501 			cipherdata.key_type = RTA_DATA_PTR;
502 		}
503 		cdb->sh_desc[0] = 0;
504 		cdb->sh_desc[1] = 0;
505 		cdb->sh_desc[2] = 0;
506 
507 		if (ses->dir == DIR_ENC)
508 			shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
509 					cdb->sh_desc, 1, swap,
510 					ses->pdcp.sn_size,
511 					ses->pdcp.hfn,
512 					ses->pdcp.bearer,
513 					ses->pdcp.pkt_dir,
514 					ses->pdcp.hfn_threshold,
515 					&cipherdata, 0);
516 		else if (ses->dir == DIR_DEC)
517 			shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
518 					cdb->sh_desc, 1, swap,
519 					ses->pdcp.sn_size,
520 					ses->pdcp.hfn,
521 					ses->pdcp.bearer,
522 					ses->pdcp.pkt_dir,
523 					ses->pdcp.hfn_threshold,
524 					&cipherdata, 0);
525 	}
526 
527 	return shared_desc_len;
528 }
529 
530 /* prepare ipsec proto command block of the session */
531 static int
532 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
533 {
534 	struct alginfo cipherdata = {0}, authdata = {0};
535 	struct sec_cdb *cdb = &ses->cdb;
536 	int32_t shared_desc_len = 0;
537 	int err;
538 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
539 	int swap = false;
540 #else
541 	int swap = true;
542 #endif
543 
544 	caam_cipher_alg(ses, &cipherdata);
545 	if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
546 		DPAA_SEC_ERR("not supported cipher alg");
547 		return -ENOTSUP;
548 	}
549 
550 	cipherdata.key = (size_t)ses->cipher_key.data;
551 	cipherdata.keylen = ses->cipher_key.length;
552 	cipherdata.key_enc_flags = 0;
553 	cipherdata.key_type = RTA_DATA_IMM;
554 
555 	caam_auth_alg(ses, &authdata);
556 	if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
557 		DPAA_SEC_ERR("not supported auth alg");
558 		return -ENOTSUP;
559 	}
560 
561 	authdata.key = (size_t)ses->auth_key.data;
562 	authdata.keylen = ses->auth_key.length;
563 	authdata.key_enc_flags = 0;
564 	authdata.key_type = RTA_DATA_IMM;
565 
566 	cdb->sh_desc[0] = cipherdata.keylen;
567 	cdb->sh_desc[1] = authdata.keylen;
568 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
569 			       MIN_JOB_DESC_SIZE,
570 			       (unsigned int *)cdb->sh_desc,
571 			       &cdb->sh_desc[2], 2);
572 
573 	if (err < 0) {
574 		DPAA_SEC_ERR("Crypto: Incorrect key lengths");
575 		return err;
576 	}
577 	if (cdb->sh_desc[2] & 1)
578 		cipherdata.key_type = RTA_DATA_IMM;
579 	else {
580 		cipherdata.key = (size_t)dpaa_mem_vtop(
581 					(void *)(size_t)cipherdata.key);
582 		cipherdata.key_type = RTA_DATA_PTR;
583 	}
584 	if (cdb->sh_desc[2] & (1<<1))
585 		authdata.key_type = RTA_DATA_IMM;
586 	else {
587 		authdata.key = (size_t)dpaa_mem_vtop(
588 					(void *)(size_t)authdata.key);
589 		authdata.key_type = RTA_DATA_PTR;
590 	}
591 
592 	cdb->sh_desc[0] = 0;
593 	cdb->sh_desc[1] = 0;
594 	cdb->sh_desc[2] = 0;
595 	if (ses->dir == DIR_ENC) {
596 		shared_desc_len = cnstr_shdsc_ipsec_new_encap(
597 				cdb->sh_desc,
598 				true, swap, SHR_SERIAL,
599 				&ses->encap_pdb,
600 				(uint8_t *)&ses->ip4_hdr,
601 				&cipherdata, &authdata);
602 	} else if (ses->dir == DIR_DEC) {
603 		shared_desc_len = cnstr_shdsc_ipsec_new_decap(
604 				cdb->sh_desc,
605 				true, swap, SHR_SERIAL,
606 				&ses->decap_pdb,
607 				&cipherdata, &authdata);
608 	}
609 	return shared_desc_len;
610 }
611 
612 /* prepare command block of the session */
613 static int
614 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
615 {
616 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
617 	int32_t shared_desc_len = 0;
618 	struct sec_cdb *cdb = &ses->cdb;
619 	int err;
620 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
621 	int swap = false;
622 #else
623 	int swap = true;
624 #endif
625 
626 	memset(cdb, 0, sizeof(struct sec_cdb));
627 
628 	if (is_proto_ipsec(ses)) {
629 		shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
630 	} else if (is_proto_pdcp(ses)) {
631 		shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
632 	} else if (is_cipher_only(ses)) {
633 		caam_cipher_alg(ses, &alginfo_c);
634 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
635 			DPAA_SEC_ERR("not supported cipher alg");
636 			return -ENOTSUP;
637 		}
638 
639 		alginfo_c.key = (size_t)ses->cipher_key.data;
640 		alginfo_c.keylen = ses->cipher_key.length;
641 		alginfo_c.key_enc_flags = 0;
642 		alginfo_c.key_type = RTA_DATA_IMM;
643 
644 		shared_desc_len = cnstr_shdsc_blkcipher(
645 						cdb->sh_desc, true,
646 						swap, &alginfo_c,
647 						NULL,
648 						ses->iv.length,
649 						ses->dir);
650 	} else if (is_auth_only(ses)) {
651 		caam_auth_alg(ses, &alginfo_a);
652 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
653 			DPAA_SEC_ERR("not supported auth alg");
654 			return -ENOTSUP;
655 		}
656 
657 		alginfo_a.key = (size_t)ses->auth_key.data;
658 		alginfo_a.keylen = ses->auth_key.length;
659 		alginfo_a.key_enc_flags = 0;
660 		alginfo_a.key_type = RTA_DATA_IMM;
661 
662 		shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
663 						   swap, &alginfo_a,
664 						   !ses->dir,
665 						   ses->digest_length);
666 	} else if (is_aead(ses)) {
667 		caam_aead_alg(ses, &alginfo);
668 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
669 			DPAA_SEC_ERR("not supported aead alg");
670 			return -ENOTSUP;
671 		}
672 		alginfo.key = (size_t)ses->aead_key.data;
673 		alginfo.keylen = ses->aead_key.length;
674 		alginfo.key_enc_flags = 0;
675 		alginfo.key_type = RTA_DATA_IMM;
676 
677 		if (ses->dir == DIR_ENC)
678 			shared_desc_len = cnstr_shdsc_gcm_encap(
679 					cdb->sh_desc, true, swap,
680 					&alginfo,
681 					ses->iv.length,
682 					ses->digest_length);
683 		else
684 			shared_desc_len = cnstr_shdsc_gcm_decap(
685 					cdb->sh_desc, true, swap,
686 					&alginfo,
687 					ses->iv.length,
688 					ses->digest_length);
689 	} else {
690 		caam_cipher_alg(ses, &alginfo_c);
691 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
692 			DPAA_SEC_ERR("not supported cipher alg");
693 			return -ENOTSUP;
694 		}
695 
696 		alginfo_c.key = (size_t)ses->cipher_key.data;
697 		alginfo_c.keylen = ses->cipher_key.length;
698 		alginfo_c.key_enc_flags = 0;
699 		alginfo_c.key_type = RTA_DATA_IMM;
700 
701 		caam_auth_alg(ses, &alginfo_a);
702 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
703 			DPAA_SEC_ERR("not supported auth alg");
704 			return -ENOTSUP;
705 		}
706 
707 		alginfo_a.key = (size_t)ses->auth_key.data;
708 		alginfo_a.keylen = ses->auth_key.length;
709 		alginfo_a.key_enc_flags = 0;
710 		alginfo_a.key_type = RTA_DATA_IMM;
711 
712 		cdb->sh_desc[0] = alginfo_c.keylen;
713 		cdb->sh_desc[1] = alginfo_a.keylen;
714 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
715 				       MIN_JOB_DESC_SIZE,
716 				       (unsigned int *)cdb->sh_desc,
717 				       &cdb->sh_desc[2], 2);
718 
719 		if (err < 0) {
720 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
721 			return err;
722 		}
723 		if (cdb->sh_desc[2] & 1)
724 			alginfo_c.key_type = RTA_DATA_IMM;
725 		else {
726 			alginfo_c.key = (size_t)dpaa_mem_vtop(
727 						(void *)(size_t)alginfo_c.key);
728 			alginfo_c.key_type = RTA_DATA_PTR;
729 		}
730 		if (cdb->sh_desc[2] & (1<<1))
731 			alginfo_a.key_type = RTA_DATA_IMM;
732 		else {
733 			alginfo_a.key = (size_t)dpaa_mem_vtop(
734 						(void *)(size_t)alginfo_a.key);
735 			alginfo_a.key_type = RTA_DATA_PTR;
736 		}
737 		cdb->sh_desc[0] = 0;
738 		cdb->sh_desc[1] = 0;
739 		cdb->sh_desc[2] = 0;
740 		/* Auth_only_len is set as 0 here and it will be
741 		 * overwritten in fd for each packet.
742 		 */
743 		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
744 				true, swap, &alginfo_c, &alginfo_a,
745 				ses->iv.length, 0,
746 				ses->digest_length, ses->dir);
747 	}
748 
749 	if (shared_desc_len < 0) {
750 		DPAA_SEC_ERR("error in preparing command block");
751 		return shared_desc_len;
752 	}
753 
754 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
755 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
756 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
757 
758 	return 0;
759 }
760 
761 /* qp is lockless, should be accessed by only one thread */
762 static int
763 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
764 {
765 	struct qman_fq *fq;
766 	unsigned int pkts = 0;
767 	int num_rx_bufs, ret;
768 	struct qm_dqrr_entry *dq;
769 	uint32_t vdqcr_flags = 0;
770 
771 	fq = &qp->outq;
772 	/*
773 	 * Until request for four buffers, we provide exact number of buffers.
774 	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
775 	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
776 	 * requested, so we request two less in this case.
777 	 */
778 	if (nb_ops < 4) {
779 		vdqcr_flags = QM_VDQCR_EXACT;
780 		num_rx_bufs = nb_ops;
781 	} else {
782 		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
783 			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
784 	}
785 	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
786 	if (ret)
787 		return 0;
788 
789 	do {
790 		const struct qm_fd *fd;
791 		struct dpaa_sec_job *job;
792 		struct dpaa_sec_op_ctx *ctx;
793 		struct rte_crypto_op *op;
794 
795 		dq = qman_dequeue(fq);
796 		if (!dq)
797 			continue;
798 
799 		fd = &dq->fd;
800 		/* sg is embedded in an op ctx,
801 		 * sg[0] is for output
802 		 * sg[1] for input
803 		 */
804 		job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
805 
806 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
807 		ctx->fd_status = fd->status;
808 		op = ctx->op;
809 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
810 			struct qm_sg_entry *sg_out;
811 			uint32_t len;
812 
813 			sg_out = &job->sg[0];
814 			hw_sg_to_cpu(sg_out);
815 			len = sg_out->length;
816 			op->sym->m_src->pkt_len = len;
817 			op->sym->m_src->data_len = len;
818 		}
819 		if (!ctx->fd_status) {
820 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
821 		} else {
822 			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
823 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
824 		}
825 		ops[pkts++] = op;
826 
827 		/* report op status to sym->op and then free the ctx memeory */
828 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
829 
830 		qman_dqrr_consume(fq, dq);
831 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
832 
833 	return pkts;
834 }
835 
836 static inline struct dpaa_sec_job *
837 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
838 {
839 	struct rte_crypto_sym_op *sym = op->sym;
840 	struct rte_mbuf *mbuf = sym->m_src;
841 	struct dpaa_sec_job *cf;
842 	struct dpaa_sec_op_ctx *ctx;
843 	struct qm_sg_entry *sg, *out_sg, *in_sg;
844 	phys_addr_t start_addr;
845 	uint8_t *old_digest, extra_segs;
846 
847 	if (is_decode(ses))
848 		extra_segs = 3;
849 	else
850 		extra_segs = 2;
851 
852 	if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
853 		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
854 				MAX_SG_ENTRIES);
855 		return NULL;
856 	}
857 	ctx = dpaa_sec_alloc_ctx(ses);
858 	if (!ctx)
859 		return NULL;
860 
861 	cf = &ctx->job;
862 	ctx->op = op;
863 	old_digest = ctx->digest;
864 
865 	/* output */
866 	out_sg = &cf->sg[0];
867 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
868 	out_sg->length = ses->digest_length;
869 	cpu_to_hw_sg(out_sg);
870 
871 	/* input */
872 	in_sg = &cf->sg[1];
873 	/* need to extend the input to a compound frame */
874 	in_sg->extension = 1;
875 	in_sg->final = 1;
876 	in_sg->length = sym->auth.data.length;
877 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
878 
879 	/* 1st seg */
880 	sg = in_sg + 1;
881 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
882 	sg->length = mbuf->data_len - sym->auth.data.offset;
883 	sg->offset = sym->auth.data.offset;
884 
885 	/* Successive segs */
886 	mbuf = mbuf->next;
887 	while (mbuf) {
888 		cpu_to_hw_sg(sg);
889 		sg++;
890 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
891 		sg->length = mbuf->data_len;
892 		mbuf = mbuf->next;
893 	}
894 
895 	if (is_decode(ses)) {
896 		/* Digest verification case */
897 		cpu_to_hw_sg(sg);
898 		sg++;
899 		rte_memcpy(old_digest, sym->auth.digest.data,
900 				ses->digest_length);
901 		start_addr = dpaa_mem_vtop(old_digest);
902 		qm_sg_entry_set64(sg, start_addr);
903 		sg->length = ses->digest_length;
904 		in_sg->length += ses->digest_length;
905 	} else {
906 		/* Digest calculation case */
907 		sg->length -= ses->digest_length;
908 	}
909 	sg->final = 1;
910 	cpu_to_hw_sg(sg);
911 	cpu_to_hw_sg(in_sg);
912 
913 	return cf;
914 }
915 
916 /**
917  * packet looks like:
918  *		|<----data_len------->|
919  *    |ip_header|ah_header|icv|payload|
920  *              ^
921  *		|
922  *	   mbuf->pkt.data
923  */
924 static inline struct dpaa_sec_job *
925 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
926 {
927 	struct rte_crypto_sym_op *sym = op->sym;
928 	struct rte_mbuf *mbuf = sym->m_src;
929 	struct dpaa_sec_job *cf;
930 	struct dpaa_sec_op_ctx *ctx;
931 	struct qm_sg_entry *sg;
932 	rte_iova_t start_addr;
933 	uint8_t *old_digest;
934 
935 	ctx = dpaa_sec_alloc_ctx(ses);
936 	if (!ctx)
937 		return NULL;
938 
939 	cf = &ctx->job;
940 	ctx->op = op;
941 	old_digest = ctx->digest;
942 
943 	start_addr = rte_pktmbuf_iova(mbuf);
944 	/* output */
945 	sg = &cf->sg[0];
946 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
947 	sg->length = ses->digest_length;
948 	cpu_to_hw_sg(sg);
949 
950 	/* input */
951 	sg = &cf->sg[1];
952 	if (is_decode(ses)) {
953 		/* need to extend the input to a compound frame */
954 		sg->extension = 1;
955 		qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
956 		sg->length = sym->auth.data.length + ses->digest_length;
957 		sg->final = 1;
958 		cpu_to_hw_sg(sg);
959 
960 		sg = &cf->sg[2];
961 		/* hash result or digest, save digest first */
962 		rte_memcpy(old_digest, sym->auth.digest.data,
963 			   ses->digest_length);
964 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
965 		sg->length = sym->auth.data.length;
966 		cpu_to_hw_sg(sg);
967 
968 		/* let's check digest by hw */
969 		start_addr = dpaa_mem_vtop(old_digest);
970 		sg++;
971 		qm_sg_entry_set64(sg, start_addr);
972 		sg->length = ses->digest_length;
973 		sg->final = 1;
974 		cpu_to_hw_sg(sg);
975 	} else {
976 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
977 		sg->length = sym->auth.data.length;
978 		sg->final = 1;
979 		cpu_to_hw_sg(sg);
980 	}
981 
982 	return cf;
983 }
984 
985 static inline struct dpaa_sec_job *
986 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
987 {
988 	struct rte_crypto_sym_op *sym = op->sym;
989 	struct dpaa_sec_job *cf;
990 	struct dpaa_sec_op_ctx *ctx;
991 	struct qm_sg_entry *sg, *out_sg, *in_sg;
992 	struct rte_mbuf *mbuf;
993 	uint8_t req_segs;
994 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
995 			ses->iv.offset);
996 
997 	if (sym->m_dst) {
998 		mbuf = sym->m_dst;
999 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1000 	} else {
1001 		mbuf = sym->m_src;
1002 		req_segs = mbuf->nb_segs * 2 + 3;
1003 	}
1004 
1005 	if (req_segs > MAX_SG_ENTRIES) {
1006 		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1007 				MAX_SG_ENTRIES);
1008 		return NULL;
1009 	}
1010 
1011 	ctx = dpaa_sec_alloc_ctx(ses);
1012 	if (!ctx)
1013 		return NULL;
1014 
1015 	cf = &ctx->job;
1016 	ctx->op = op;
1017 
1018 	/* output */
1019 	out_sg = &cf->sg[0];
1020 	out_sg->extension = 1;
1021 	out_sg->length = sym->cipher.data.length;
1022 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1023 	cpu_to_hw_sg(out_sg);
1024 
1025 	/* 1st seg */
1026 	sg = &cf->sg[2];
1027 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1028 	sg->length = mbuf->data_len - sym->cipher.data.offset;
1029 	sg->offset = sym->cipher.data.offset;
1030 
1031 	/* Successive segs */
1032 	mbuf = mbuf->next;
1033 	while (mbuf) {
1034 		cpu_to_hw_sg(sg);
1035 		sg++;
1036 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1037 		sg->length = mbuf->data_len;
1038 		mbuf = mbuf->next;
1039 	}
1040 	sg->final = 1;
1041 	cpu_to_hw_sg(sg);
1042 
1043 	/* input */
1044 	mbuf = sym->m_src;
1045 	in_sg = &cf->sg[1];
1046 	in_sg->extension = 1;
1047 	in_sg->final = 1;
1048 	in_sg->length = sym->cipher.data.length + ses->iv.length;
1049 
1050 	sg++;
1051 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1052 	cpu_to_hw_sg(in_sg);
1053 
1054 	/* IV */
1055 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1056 	sg->length = ses->iv.length;
1057 	cpu_to_hw_sg(sg);
1058 
1059 	/* 1st seg */
1060 	sg++;
1061 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1062 	sg->length = mbuf->data_len - sym->cipher.data.offset;
1063 	sg->offset = sym->cipher.data.offset;
1064 
1065 	/* Successive segs */
1066 	mbuf = mbuf->next;
1067 	while (mbuf) {
1068 		cpu_to_hw_sg(sg);
1069 		sg++;
1070 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1071 		sg->length = mbuf->data_len;
1072 		mbuf = mbuf->next;
1073 	}
1074 	sg->final = 1;
1075 	cpu_to_hw_sg(sg);
1076 
1077 	return cf;
1078 }
1079 
1080 static inline struct dpaa_sec_job *
1081 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1082 {
1083 	struct rte_crypto_sym_op *sym = op->sym;
1084 	struct dpaa_sec_job *cf;
1085 	struct dpaa_sec_op_ctx *ctx;
1086 	struct qm_sg_entry *sg;
1087 	rte_iova_t src_start_addr, dst_start_addr;
1088 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1089 			ses->iv.offset);
1090 
1091 	ctx = dpaa_sec_alloc_ctx(ses);
1092 	if (!ctx)
1093 		return NULL;
1094 
1095 	cf = &ctx->job;
1096 	ctx->op = op;
1097 
1098 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
1099 
1100 	if (sym->m_dst)
1101 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1102 	else
1103 		dst_start_addr = src_start_addr;
1104 
1105 	/* output */
1106 	sg = &cf->sg[0];
1107 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1108 	sg->length = sym->cipher.data.length + ses->iv.length;
1109 	cpu_to_hw_sg(sg);
1110 
1111 	/* input */
1112 	sg = &cf->sg[1];
1113 
1114 	/* need to extend the input to a compound frame */
1115 	sg->extension = 1;
1116 	sg->final = 1;
1117 	sg->length = sym->cipher.data.length + ses->iv.length;
1118 	qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1119 	cpu_to_hw_sg(sg);
1120 
1121 	sg = &cf->sg[2];
1122 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1123 	sg->length = ses->iv.length;
1124 	cpu_to_hw_sg(sg);
1125 
1126 	sg++;
1127 	qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
1128 	sg->length = sym->cipher.data.length;
1129 	sg->final = 1;
1130 	cpu_to_hw_sg(sg);
1131 
1132 	return cf;
1133 }
1134 
1135 static inline struct dpaa_sec_job *
1136 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1137 {
1138 	struct rte_crypto_sym_op *sym = op->sym;
1139 	struct dpaa_sec_job *cf;
1140 	struct dpaa_sec_op_ctx *ctx;
1141 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1142 	struct rte_mbuf *mbuf;
1143 	uint8_t req_segs;
1144 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1145 			ses->iv.offset);
1146 
1147 	if (sym->m_dst) {
1148 		mbuf = sym->m_dst;
1149 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1150 	} else {
1151 		mbuf = sym->m_src;
1152 		req_segs = mbuf->nb_segs * 2 + 4;
1153 	}
1154 
1155 	if (ses->auth_only_len)
1156 		req_segs++;
1157 
1158 	if (req_segs > MAX_SG_ENTRIES) {
1159 		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1160 				MAX_SG_ENTRIES);
1161 		return NULL;
1162 	}
1163 
1164 	ctx = dpaa_sec_alloc_ctx(ses);
1165 	if (!ctx)
1166 		return NULL;
1167 
1168 	cf = &ctx->job;
1169 	ctx->op = op;
1170 
1171 	rte_prefetch0(cf->sg);
1172 
1173 	/* output */
1174 	out_sg = &cf->sg[0];
1175 	out_sg->extension = 1;
1176 	if (is_encode(ses))
1177 		out_sg->length = sym->aead.data.length + ses->auth_only_len
1178 						+ ses->digest_length;
1179 	else
1180 		out_sg->length = sym->aead.data.length + ses->auth_only_len;
1181 
1182 	/* output sg entries */
1183 	sg = &cf->sg[2];
1184 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1185 	cpu_to_hw_sg(out_sg);
1186 
1187 	/* 1st seg */
1188 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1189 	sg->length = mbuf->data_len - sym->aead.data.offset +
1190 					ses->auth_only_len;
1191 	sg->offset = sym->aead.data.offset - ses->auth_only_len;
1192 
1193 	/* Successive segs */
1194 	mbuf = mbuf->next;
1195 	while (mbuf) {
1196 		cpu_to_hw_sg(sg);
1197 		sg++;
1198 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1199 		sg->length = mbuf->data_len;
1200 		mbuf = mbuf->next;
1201 	}
1202 	sg->length -= ses->digest_length;
1203 
1204 	if (is_encode(ses)) {
1205 		cpu_to_hw_sg(sg);
1206 		/* set auth output */
1207 		sg++;
1208 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1209 		sg->length = ses->digest_length;
1210 	}
1211 	sg->final = 1;
1212 	cpu_to_hw_sg(sg);
1213 
1214 	/* input */
1215 	mbuf = sym->m_src;
1216 	in_sg = &cf->sg[1];
1217 	in_sg->extension = 1;
1218 	in_sg->final = 1;
1219 	if (is_encode(ses))
1220 		in_sg->length = ses->iv.length + sym->aead.data.length
1221 							+ ses->auth_only_len;
1222 	else
1223 		in_sg->length = ses->iv.length + sym->aead.data.length
1224 				+ ses->auth_only_len + ses->digest_length;
1225 
1226 	/* input sg entries */
1227 	sg++;
1228 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1229 	cpu_to_hw_sg(in_sg);
1230 
1231 	/* 1st seg IV */
1232 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1233 	sg->length = ses->iv.length;
1234 	cpu_to_hw_sg(sg);
1235 
1236 	/* 2nd seg auth only */
1237 	if (ses->auth_only_len) {
1238 		sg++;
1239 		qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1240 		sg->length = ses->auth_only_len;
1241 		cpu_to_hw_sg(sg);
1242 	}
1243 
1244 	/* 3rd seg */
1245 	sg++;
1246 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1247 	sg->length = mbuf->data_len - sym->aead.data.offset;
1248 	sg->offset = sym->aead.data.offset;
1249 
1250 	/* Successive segs */
1251 	mbuf = mbuf->next;
1252 	while (mbuf) {
1253 		cpu_to_hw_sg(sg);
1254 		sg++;
1255 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1256 		sg->length = mbuf->data_len;
1257 		mbuf = mbuf->next;
1258 	}
1259 
1260 	if (is_decode(ses)) {
1261 		cpu_to_hw_sg(sg);
1262 		sg++;
1263 		memcpy(ctx->digest, sym->aead.digest.data,
1264 			ses->digest_length);
1265 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1266 		sg->length = ses->digest_length;
1267 	}
1268 	sg->final = 1;
1269 	cpu_to_hw_sg(sg);
1270 
1271 	return cf;
1272 }
1273 
1274 static inline struct dpaa_sec_job *
1275 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1276 {
1277 	struct rte_crypto_sym_op *sym = op->sym;
1278 	struct dpaa_sec_job *cf;
1279 	struct dpaa_sec_op_ctx *ctx;
1280 	struct qm_sg_entry *sg;
1281 	uint32_t length = 0;
1282 	rte_iova_t src_start_addr, dst_start_addr;
1283 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1284 			ses->iv.offset);
1285 
1286 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1287 
1288 	if (sym->m_dst)
1289 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1290 	else
1291 		dst_start_addr = src_start_addr;
1292 
1293 	ctx = dpaa_sec_alloc_ctx(ses);
1294 	if (!ctx)
1295 		return NULL;
1296 
1297 	cf = &ctx->job;
1298 	ctx->op = op;
1299 
1300 	/* input */
1301 	rte_prefetch0(cf->sg);
1302 	sg = &cf->sg[2];
1303 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1304 	if (is_encode(ses)) {
1305 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1306 		sg->length = ses->iv.length;
1307 		length += sg->length;
1308 		cpu_to_hw_sg(sg);
1309 
1310 		sg++;
1311 		if (ses->auth_only_len) {
1312 			qm_sg_entry_set64(sg,
1313 					  dpaa_mem_vtop(sym->aead.aad.data));
1314 			sg->length = ses->auth_only_len;
1315 			length += sg->length;
1316 			cpu_to_hw_sg(sg);
1317 			sg++;
1318 		}
1319 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1320 		sg->length = sym->aead.data.length;
1321 		length += sg->length;
1322 		sg->final = 1;
1323 		cpu_to_hw_sg(sg);
1324 	} else {
1325 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1326 		sg->length = ses->iv.length;
1327 		length += sg->length;
1328 		cpu_to_hw_sg(sg);
1329 
1330 		sg++;
1331 		if (ses->auth_only_len) {
1332 			qm_sg_entry_set64(sg,
1333 					  dpaa_mem_vtop(sym->aead.aad.data));
1334 			sg->length = ses->auth_only_len;
1335 			length += sg->length;
1336 			cpu_to_hw_sg(sg);
1337 			sg++;
1338 		}
1339 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1340 		sg->length = sym->aead.data.length;
1341 		length += sg->length;
1342 		cpu_to_hw_sg(sg);
1343 
1344 		memcpy(ctx->digest, sym->aead.digest.data,
1345 		       ses->digest_length);
1346 		sg++;
1347 
1348 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1349 		sg->length = ses->digest_length;
1350 		length += sg->length;
1351 		sg->final = 1;
1352 		cpu_to_hw_sg(sg);
1353 	}
1354 	/* input compound frame */
1355 	cf->sg[1].length = length;
1356 	cf->sg[1].extension = 1;
1357 	cf->sg[1].final = 1;
1358 	cpu_to_hw_sg(&cf->sg[1]);
1359 
1360 	/* output */
1361 	sg++;
1362 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1363 	qm_sg_entry_set64(sg,
1364 		dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1365 	sg->length = sym->aead.data.length + ses->auth_only_len;
1366 	length = sg->length;
1367 	if (is_encode(ses)) {
1368 		cpu_to_hw_sg(sg);
1369 		/* set auth output */
1370 		sg++;
1371 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1372 		sg->length = ses->digest_length;
1373 		length += sg->length;
1374 	}
1375 	sg->final = 1;
1376 	cpu_to_hw_sg(sg);
1377 
1378 	/* output compound frame */
1379 	cf->sg[0].length = length;
1380 	cf->sg[0].extension = 1;
1381 	cpu_to_hw_sg(&cf->sg[0]);
1382 
1383 	return cf;
1384 }
1385 
1386 static inline struct dpaa_sec_job *
1387 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1388 {
1389 	struct rte_crypto_sym_op *sym = op->sym;
1390 	struct dpaa_sec_job *cf;
1391 	struct dpaa_sec_op_ctx *ctx;
1392 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1393 	struct rte_mbuf *mbuf;
1394 	uint8_t req_segs;
1395 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1396 			ses->iv.offset);
1397 
1398 	if (sym->m_dst) {
1399 		mbuf = sym->m_dst;
1400 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1401 	} else {
1402 		mbuf = sym->m_src;
1403 		req_segs = mbuf->nb_segs * 2 + 4;
1404 	}
1405 
1406 	if (req_segs > MAX_SG_ENTRIES) {
1407 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1408 				MAX_SG_ENTRIES);
1409 		return NULL;
1410 	}
1411 
1412 	ctx = dpaa_sec_alloc_ctx(ses);
1413 	if (!ctx)
1414 		return NULL;
1415 
1416 	cf = &ctx->job;
1417 	ctx->op = op;
1418 
1419 	rte_prefetch0(cf->sg);
1420 
1421 	/* output */
1422 	out_sg = &cf->sg[0];
1423 	out_sg->extension = 1;
1424 	if (is_encode(ses))
1425 		out_sg->length = sym->auth.data.length + ses->digest_length;
1426 	else
1427 		out_sg->length = sym->auth.data.length;
1428 
1429 	/* output sg entries */
1430 	sg = &cf->sg[2];
1431 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1432 	cpu_to_hw_sg(out_sg);
1433 
1434 	/* 1st seg */
1435 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1436 	sg->length = mbuf->data_len - sym->auth.data.offset;
1437 	sg->offset = sym->auth.data.offset;
1438 
1439 	/* Successive segs */
1440 	mbuf = mbuf->next;
1441 	while (mbuf) {
1442 		cpu_to_hw_sg(sg);
1443 		sg++;
1444 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1445 		sg->length = mbuf->data_len;
1446 		mbuf = mbuf->next;
1447 	}
1448 	sg->length -= ses->digest_length;
1449 
1450 	if (is_encode(ses)) {
1451 		cpu_to_hw_sg(sg);
1452 		/* set auth output */
1453 		sg++;
1454 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1455 		sg->length = ses->digest_length;
1456 	}
1457 	sg->final = 1;
1458 	cpu_to_hw_sg(sg);
1459 
1460 	/* input */
1461 	mbuf = sym->m_src;
1462 	in_sg = &cf->sg[1];
1463 	in_sg->extension = 1;
1464 	in_sg->final = 1;
1465 	if (is_encode(ses))
1466 		in_sg->length = ses->iv.length + sym->auth.data.length;
1467 	else
1468 		in_sg->length = ses->iv.length + sym->auth.data.length
1469 						+ ses->digest_length;
1470 
1471 	/* input sg entries */
1472 	sg++;
1473 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1474 	cpu_to_hw_sg(in_sg);
1475 
1476 	/* 1st seg IV */
1477 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1478 	sg->length = ses->iv.length;
1479 	cpu_to_hw_sg(sg);
1480 
1481 	/* 2nd seg */
1482 	sg++;
1483 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1484 	sg->length = mbuf->data_len - sym->auth.data.offset;
1485 	sg->offset = sym->auth.data.offset;
1486 
1487 	/* Successive segs */
1488 	mbuf = mbuf->next;
1489 	while (mbuf) {
1490 		cpu_to_hw_sg(sg);
1491 		sg++;
1492 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1493 		sg->length = mbuf->data_len;
1494 		mbuf = mbuf->next;
1495 	}
1496 
1497 	sg->length -= ses->digest_length;
1498 	if (is_decode(ses)) {
1499 		cpu_to_hw_sg(sg);
1500 		sg++;
1501 		memcpy(ctx->digest, sym->auth.digest.data,
1502 			ses->digest_length);
1503 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1504 		sg->length = ses->digest_length;
1505 	}
1506 	sg->final = 1;
1507 	cpu_to_hw_sg(sg);
1508 
1509 	return cf;
1510 }
1511 
1512 static inline struct dpaa_sec_job *
1513 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1514 {
1515 	struct rte_crypto_sym_op *sym = op->sym;
1516 	struct dpaa_sec_job *cf;
1517 	struct dpaa_sec_op_ctx *ctx;
1518 	struct qm_sg_entry *sg;
1519 	rte_iova_t src_start_addr, dst_start_addr;
1520 	uint32_t length = 0;
1521 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1522 			ses->iv.offset);
1523 
1524 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1525 	if (sym->m_dst)
1526 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1527 	else
1528 		dst_start_addr = src_start_addr;
1529 
1530 	ctx = dpaa_sec_alloc_ctx(ses);
1531 	if (!ctx)
1532 		return NULL;
1533 
1534 	cf = &ctx->job;
1535 	ctx->op = op;
1536 
1537 	/* input */
1538 	rte_prefetch0(cf->sg);
1539 	sg = &cf->sg[2];
1540 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1541 	if (is_encode(ses)) {
1542 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1543 		sg->length = ses->iv.length;
1544 		length += sg->length;
1545 		cpu_to_hw_sg(sg);
1546 
1547 		sg++;
1548 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1549 		sg->length = sym->auth.data.length;
1550 		length += sg->length;
1551 		sg->final = 1;
1552 		cpu_to_hw_sg(sg);
1553 	} else {
1554 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1555 		sg->length = ses->iv.length;
1556 		length += sg->length;
1557 		cpu_to_hw_sg(sg);
1558 
1559 		sg++;
1560 
1561 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1562 		sg->length = sym->auth.data.length;
1563 		length += sg->length;
1564 		cpu_to_hw_sg(sg);
1565 
1566 		memcpy(ctx->digest, sym->auth.digest.data,
1567 		       ses->digest_length);
1568 		sg++;
1569 
1570 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1571 		sg->length = ses->digest_length;
1572 		length += sg->length;
1573 		sg->final = 1;
1574 		cpu_to_hw_sg(sg);
1575 	}
1576 	/* input compound frame */
1577 	cf->sg[1].length = length;
1578 	cf->sg[1].extension = 1;
1579 	cf->sg[1].final = 1;
1580 	cpu_to_hw_sg(&cf->sg[1]);
1581 
1582 	/* output */
1583 	sg++;
1584 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1585 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1586 	sg->length = sym->cipher.data.length;
1587 	length = sg->length;
1588 	if (is_encode(ses)) {
1589 		cpu_to_hw_sg(sg);
1590 		/* set auth output */
1591 		sg++;
1592 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1593 		sg->length = ses->digest_length;
1594 		length += sg->length;
1595 	}
1596 	sg->final = 1;
1597 	cpu_to_hw_sg(sg);
1598 
1599 	/* output compound frame */
1600 	cf->sg[0].length = length;
1601 	cf->sg[0].extension = 1;
1602 	cpu_to_hw_sg(&cf->sg[0]);
1603 
1604 	return cf;
1605 }
1606 
1607 static inline struct dpaa_sec_job *
1608 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1609 {
1610 	struct rte_crypto_sym_op *sym = op->sym;
1611 	struct dpaa_sec_job *cf;
1612 	struct dpaa_sec_op_ctx *ctx;
1613 	struct qm_sg_entry *sg;
1614 	phys_addr_t src_start_addr, dst_start_addr;
1615 
1616 	ctx = dpaa_sec_alloc_ctx(ses);
1617 	if (!ctx)
1618 		return NULL;
1619 	cf = &ctx->job;
1620 	ctx->op = op;
1621 
1622 	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1623 
1624 	if (sym->m_dst)
1625 		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1626 	else
1627 		dst_start_addr = src_start_addr;
1628 
1629 	/* input */
1630 	sg = &cf->sg[1];
1631 	qm_sg_entry_set64(sg, src_start_addr);
1632 	sg->length = sym->m_src->pkt_len;
1633 	sg->final = 1;
1634 	cpu_to_hw_sg(sg);
1635 
1636 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1637 	/* output */
1638 	sg = &cf->sg[0];
1639 	qm_sg_entry_set64(sg, dst_start_addr);
1640 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1641 	cpu_to_hw_sg(sg);
1642 
1643 	return cf;
1644 }
1645 
1646 static uint16_t
1647 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1648 		       uint16_t nb_ops)
1649 {
1650 	/* Function to transmit the frames to given device and queuepair */
1651 	uint32_t loop;
1652 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1653 	uint16_t num_tx = 0;
1654 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1655 	uint32_t frames_to_send;
1656 	struct rte_crypto_op *op;
1657 	struct dpaa_sec_job *cf;
1658 	dpaa_sec_session *ses;
1659 	uint32_t auth_only_len;
1660 	struct qman_fq *inq[DPAA_SEC_BURST];
1661 
1662 	while (nb_ops) {
1663 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1664 				DPAA_SEC_BURST : nb_ops;
1665 		for (loop = 0; loop < frames_to_send; loop++) {
1666 			op = *(ops++);
1667 			switch (op->sess_type) {
1668 			case RTE_CRYPTO_OP_WITH_SESSION:
1669 				ses = (dpaa_sec_session *)
1670 					get_sym_session_private_data(
1671 							op->sym->session,
1672 							cryptodev_driver_id);
1673 				break;
1674 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1675 				ses = (dpaa_sec_session *)
1676 					get_sec_session_private_data(
1677 							op->sym->sec_session);
1678 				break;
1679 			default:
1680 				DPAA_SEC_DP_ERR(
1681 					"sessionless crypto op not supported");
1682 				frames_to_send = loop;
1683 				nb_ops = loop;
1684 				goto send_pkts;
1685 			}
1686 			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1687 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1688 					frames_to_send = loop;
1689 					nb_ops = loop;
1690 					goto send_pkts;
1691 				}
1692 			} else if (unlikely(ses->qp[rte_lcore_id() %
1693 						MAX_DPAA_CORES] != qp)) {
1694 				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1695 					" New qp = %p\n",
1696 					ses->qp[rte_lcore_id() %
1697 					MAX_DPAA_CORES], qp);
1698 				frames_to_send = loop;
1699 				nb_ops = loop;
1700 				goto send_pkts;
1701 			}
1702 
1703 			auth_only_len = op->sym->auth.data.length -
1704 						op->sym->cipher.data.length;
1705 			if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1706 				if (is_proto_ipsec(ses)) {
1707 					cf = build_proto(op, ses);
1708 				} else if (is_proto_pdcp(ses)) {
1709 					cf = build_proto(op, ses);
1710 				} else if (is_auth_only(ses)) {
1711 					cf = build_auth_only(op, ses);
1712 				} else if (is_cipher_only(ses)) {
1713 					cf = build_cipher_only(op, ses);
1714 				} else if (is_aead(ses)) {
1715 					cf = build_cipher_auth_gcm(op, ses);
1716 					auth_only_len = ses->auth_only_len;
1717 				} else if (is_auth_cipher(ses)) {
1718 					cf = build_cipher_auth(op, ses);
1719 				} else {
1720 					DPAA_SEC_DP_ERR("not supported ops");
1721 					frames_to_send = loop;
1722 					nb_ops = loop;
1723 					goto send_pkts;
1724 				}
1725 			} else {
1726 				if (is_auth_only(ses)) {
1727 					cf = build_auth_only_sg(op, ses);
1728 				} else if (is_cipher_only(ses)) {
1729 					cf = build_cipher_only_sg(op, ses);
1730 				} else if (is_aead(ses)) {
1731 					cf = build_cipher_auth_gcm_sg(op, ses);
1732 					auth_only_len = ses->auth_only_len;
1733 				} else if (is_auth_cipher(ses)) {
1734 					cf = build_cipher_auth_sg(op, ses);
1735 				} else {
1736 					DPAA_SEC_DP_ERR("not supported ops");
1737 					frames_to_send = loop;
1738 					nb_ops = loop;
1739 					goto send_pkts;
1740 				}
1741 			}
1742 			if (unlikely(!cf)) {
1743 				frames_to_send = loop;
1744 				nb_ops = loop;
1745 				goto send_pkts;
1746 			}
1747 
1748 			fd = &fds[loop];
1749 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1750 			fd->opaque_addr = 0;
1751 			fd->cmd = 0;
1752 			qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1753 			fd->_format1 = qm_fd_compound;
1754 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
1755 			/* Auth_only_len is set as 0 in descriptor and it is
1756 			 * overwritten here in the fd.cmd which will update
1757 			 * the DPOVRD reg.
1758 			 */
1759 			if (auth_only_len)
1760 				fd->cmd = 0x80000000 | auth_only_len;
1761 
1762 		}
1763 send_pkts:
1764 		loop = 0;
1765 		while (loop < frames_to_send) {
1766 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1767 					frames_to_send - loop);
1768 		}
1769 		nb_ops -= frames_to_send;
1770 		num_tx += frames_to_send;
1771 	}
1772 
1773 	dpaa_qp->tx_pkts += num_tx;
1774 	dpaa_qp->tx_errs += nb_ops - num_tx;
1775 
1776 	return num_tx;
1777 }
1778 
1779 static uint16_t
1780 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1781 		       uint16_t nb_ops)
1782 {
1783 	uint16_t num_rx;
1784 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1785 
1786 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1787 
1788 	dpaa_qp->rx_pkts += num_rx;
1789 	dpaa_qp->rx_errs += nb_ops - num_rx;
1790 
1791 	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1792 
1793 	return num_rx;
1794 }
1795 
1796 /** Release queue pair */
1797 static int
1798 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1799 			    uint16_t qp_id)
1800 {
1801 	struct dpaa_sec_dev_private *internals;
1802 	struct dpaa_sec_qp *qp = NULL;
1803 
1804 	PMD_INIT_FUNC_TRACE();
1805 
1806 	DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1807 
1808 	internals = dev->data->dev_private;
1809 	if (qp_id >= internals->max_nb_queue_pairs) {
1810 		DPAA_SEC_ERR("Max supported qpid %d",
1811 			     internals->max_nb_queue_pairs);
1812 		return -EINVAL;
1813 	}
1814 
1815 	qp = &internals->qps[qp_id];
1816 	qp->internals = NULL;
1817 	dev->data->queue_pairs[qp_id] = NULL;
1818 
1819 	return 0;
1820 }
1821 
1822 /** Setup a queue pair */
1823 static int
1824 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1825 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1826 		__rte_unused int socket_id)
1827 {
1828 	struct dpaa_sec_dev_private *internals;
1829 	struct dpaa_sec_qp *qp = NULL;
1830 
1831 	DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1832 
1833 	internals = dev->data->dev_private;
1834 	if (qp_id >= internals->max_nb_queue_pairs) {
1835 		DPAA_SEC_ERR("Max supported qpid %d",
1836 			     internals->max_nb_queue_pairs);
1837 		return -EINVAL;
1838 	}
1839 
1840 	qp = &internals->qps[qp_id];
1841 	qp->internals = internals;
1842 	dev->data->queue_pairs[qp_id] = qp;
1843 
1844 	return 0;
1845 }
1846 
1847 /** Return the number of allocated queue pairs */
1848 static uint32_t
1849 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1850 {
1851 	PMD_INIT_FUNC_TRACE();
1852 
1853 	return dev->data->nb_queue_pairs;
1854 }
1855 
1856 /** Returns the size of session structure */
1857 static unsigned int
1858 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1859 {
1860 	PMD_INIT_FUNC_TRACE();
1861 
1862 	return sizeof(dpaa_sec_session);
1863 }
1864 
1865 static int
1866 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1867 		     struct rte_crypto_sym_xform *xform,
1868 		     dpaa_sec_session *session)
1869 {
1870 	session->cipher_alg = xform->cipher.algo;
1871 	session->iv.length = xform->cipher.iv.length;
1872 	session->iv.offset = xform->cipher.iv.offset;
1873 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1874 					       RTE_CACHE_LINE_SIZE);
1875 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1876 		DPAA_SEC_ERR("No Memory for cipher key");
1877 		return -ENOMEM;
1878 	}
1879 	session->cipher_key.length = xform->cipher.key.length;
1880 
1881 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1882 	       xform->cipher.key.length);
1883 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1884 			DIR_ENC : DIR_DEC;
1885 
1886 	return 0;
1887 }
1888 
1889 static int
1890 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1891 		   struct rte_crypto_sym_xform *xform,
1892 		   dpaa_sec_session *session)
1893 {
1894 	session->auth_alg = xform->auth.algo;
1895 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1896 					     RTE_CACHE_LINE_SIZE);
1897 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1898 		DPAA_SEC_ERR("No Memory for auth key");
1899 		return -ENOMEM;
1900 	}
1901 	session->auth_key.length = xform->auth.key.length;
1902 	session->digest_length = xform->auth.digest_length;
1903 
1904 	memcpy(session->auth_key.data, xform->auth.key.data,
1905 	       xform->auth.key.length);
1906 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1907 			DIR_ENC : DIR_DEC;
1908 
1909 	return 0;
1910 }
1911 
1912 static int
1913 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1914 		   struct rte_crypto_sym_xform *xform,
1915 		   dpaa_sec_session *session)
1916 {
1917 	session->aead_alg = xform->aead.algo;
1918 	session->iv.length = xform->aead.iv.length;
1919 	session->iv.offset = xform->aead.iv.offset;
1920 	session->auth_only_len = xform->aead.aad_length;
1921 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1922 					     RTE_CACHE_LINE_SIZE);
1923 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1924 		DPAA_SEC_ERR("No Memory for aead key\n");
1925 		return -ENOMEM;
1926 	}
1927 	session->aead_key.length = xform->aead.key.length;
1928 	session->digest_length = xform->aead.digest_length;
1929 
1930 	memcpy(session->aead_key.data, xform->aead.key.data,
1931 	       xform->aead.key.length);
1932 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1933 			DIR_ENC : DIR_DEC;
1934 
1935 	return 0;
1936 }
1937 
1938 static struct qman_fq *
1939 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1940 {
1941 	unsigned int i;
1942 
1943 	for (i = 0; i < qi->max_nb_sessions; i++) {
1944 		if (qi->inq_attach[i] == 0) {
1945 			qi->inq_attach[i] = 1;
1946 			return &qi->inq[i];
1947 		}
1948 	}
1949 	DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
1950 
1951 	return NULL;
1952 }
1953 
1954 static int
1955 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1956 {
1957 	unsigned int i;
1958 
1959 	for (i = 0; i < qi->max_nb_sessions; i++) {
1960 		if (&qi->inq[i] == fq) {
1961 			qman_retire_fq(fq, NULL);
1962 			qman_oos_fq(fq);
1963 			qi->inq_attach[i] = 0;
1964 			return 0;
1965 		}
1966 	}
1967 	return -1;
1968 }
1969 
1970 static int
1971 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1972 {
1973 	int ret;
1974 
1975 	sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
1976 	ret = dpaa_sec_prep_cdb(sess);
1977 	if (ret) {
1978 		DPAA_SEC_ERR("Unable to prepare sec cdb");
1979 		return -1;
1980 	}
1981 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1982 		ret = rte_dpaa_portal_init((void *)0);
1983 		if (ret) {
1984 			DPAA_SEC_ERR("Failure in affining portal");
1985 			return ret;
1986 		}
1987 	}
1988 	ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
1989 			       dpaa_mem_vtop(&sess->cdb),
1990 			       qman_fq_fqid(&qp->outq));
1991 	if (ret)
1992 		DPAA_SEC_ERR("Unable to init sec queue");
1993 
1994 	return ret;
1995 }
1996 
1997 static int
1998 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1999 			    struct rte_crypto_sym_xform *xform,	void *sess)
2000 {
2001 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2002 	dpaa_sec_session *session = sess;
2003 	uint32_t i;
2004 
2005 	PMD_INIT_FUNC_TRACE();
2006 
2007 	if (unlikely(sess == NULL)) {
2008 		DPAA_SEC_ERR("invalid session struct");
2009 		return -EINVAL;
2010 	}
2011 	memset(session, 0, sizeof(dpaa_sec_session));
2012 
2013 	/* Default IV length = 0 */
2014 	session->iv.length = 0;
2015 
2016 	/* Cipher Only */
2017 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2018 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2019 		dpaa_sec_cipher_init(dev, xform, session);
2020 
2021 	/* Authentication Only */
2022 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2023 		   xform->next == NULL) {
2024 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2025 		dpaa_sec_auth_init(dev, xform, session);
2026 
2027 	/* Cipher then Authenticate */
2028 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2029 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2030 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2031 			dpaa_sec_cipher_init(dev, xform, session);
2032 			dpaa_sec_auth_init(dev, xform->next, session);
2033 		} else {
2034 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2035 			return -EINVAL;
2036 		}
2037 
2038 	/* Authenticate then Cipher */
2039 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2040 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2041 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2042 			dpaa_sec_auth_init(dev, xform, session);
2043 			dpaa_sec_cipher_init(dev, xform->next, session);
2044 		} else {
2045 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
2046 			return -EINVAL;
2047 		}
2048 
2049 	/* AEAD operation for AES-GCM kind of Algorithms */
2050 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2051 		   xform->next == NULL) {
2052 		dpaa_sec_aead_init(dev, xform, session);
2053 
2054 	} else {
2055 		DPAA_SEC_ERR("Invalid crypto type");
2056 		return -EINVAL;
2057 	}
2058 	session->ctx_pool = internals->ctx_pool;
2059 	rte_spinlock_lock(&internals->lock);
2060 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2061 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2062 		if (session->inq[i] == NULL) {
2063 			DPAA_SEC_ERR("unable to attach sec queue");
2064 			rte_spinlock_unlock(&internals->lock);
2065 			goto err1;
2066 		}
2067 	}
2068 	rte_spinlock_unlock(&internals->lock);
2069 
2070 	return 0;
2071 
2072 err1:
2073 	rte_free(session->cipher_key.data);
2074 	rte_free(session->auth_key.data);
2075 	memset(session, 0, sizeof(dpaa_sec_session));
2076 
2077 	return -EINVAL;
2078 }
2079 
2080 static int
2081 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2082 		struct rte_crypto_sym_xform *xform,
2083 		struct rte_cryptodev_sym_session *sess,
2084 		struct rte_mempool *mempool)
2085 {
2086 	void *sess_private_data;
2087 	int ret;
2088 
2089 	PMD_INIT_FUNC_TRACE();
2090 
2091 	if (rte_mempool_get(mempool, &sess_private_data)) {
2092 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2093 		return -ENOMEM;
2094 	}
2095 
2096 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2097 	if (ret != 0) {
2098 		DPAA_SEC_ERR("failed to configure session parameters");
2099 
2100 		/* Return session to mempool */
2101 		rte_mempool_put(mempool, sess_private_data);
2102 		return ret;
2103 	}
2104 
2105 	set_sym_session_private_data(sess, dev->driver_id,
2106 			sess_private_data);
2107 
2108 
2109 	return 0;
2110 }
2111 
2112 /** Clear the memory of session so it doesn't leave key material behind */
2113 static void
2114 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2115 		struct rte_cryptodev_sym_session *sess)
2116 {
2117 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2118 	uint8_t index = dev->driver_id;
2119 	void *sess_priv = get_sym_session_private_data(sess, index);
2120 
2121 	PMD_INIT_FUNC_TRACE();
2122 
2123 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2124 
2125 	if (sess_priv) {
2126 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2127 
2128 		if (s->inq[rte_lcore_id() % MAX_DPAA_CORES])
2129 			dpaa_sec_detach_rxq(qi,
2130 				s->inq[rte_lcore_id() % MAX_DPAA_CORES]);
2131 		rte_free(s->cipher_key.data);
2132 		rte_free(s->auth_key.data);
2133 		memset(s, 0, sizeof(dpaa_sec_session));
2134 		set_sym_session_private_data(sess, index, NULL);
2135 		rte_mempool_put(sess_mp, sess_priv);
2136 	}
2137 }
2138 
2139 static int
2140 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2141 			   struct rte_security_session_conf *conf,
2142 			   void *sess)
2143 {
2144 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2145 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2146 	struct rte_crypto_auth_xform *auth_xform = NULL;
2147 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2148 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2149 	uint32_t i;
2150 
2151 	PMD_INIT_FUNC_TRACE();
2152 
2153 	memset(session, 0, sizeof(dpaa_sec_session));
2154 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2155 		cipher_xform = &conf->crypto_xform->cipher;
2156 		if (conf->crypto_xform->next)
2157 			auth_xform = &conf->crypto_xform->next->auth;
2158 	} else {
2159 		auth_xform = &conf->crypto_xform->auth;
2160 		if (conf->crypto_xform->next)
2161 			cipher_xform = &conf->crypto_xform->next->cipher;
2162 	}
2163 	session->proto_alg = conf->protocol;
2164 
2165 	if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2166 		session->cipher_key.data = rte_zmalloc(NULL,
2167 						       cipher_xform->key.length,
2168 						       RTE_CACHE_LINE_SIZE);
2169 		if (session->cipher_key.data == NULL &&
2170 				cipher_xform->key.length > 0) {
2171 			DPAA_SEC_ERR("No Memory for cipher key");
2172 			return -ENOMEM;
2173 		}
2174 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2175 				cipher_xform->key.length);
2176 		session->cipher_key.length = cipher_xform->key.length;
2177 
2178 		switch (cipher_xform->algo) {
2179 		case RTE_CRYPTO_CIPHER_AES_CBC:
2180 		case RTE_CRYPTO_CIPHER_3DES_CBC:
2181 		case RTE_CRYPTO_CIPHER_AES_CTR:
2182 			break;
2183 		default:
2184 			DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2185 				cipher_xform->algo);
2186 			goto out;
2187 		}
2188 		session->cipher_alg = cipher_xform->algo;
2189 	} else {
2190 		session->cipher_key.data = NULL;
2191 		session->cipher_key.length = 0;
2192 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2193 	}
2194 
2195 	if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2196 		session->auth_key.data = rte_zmalloc(NULL,
2197 						auth_xform->key.length,
2198 						RTE_CACHE_LINE_SIZE);
2199 		if (session->auth_key.data == NULL &&
2200 				auth_xform->key.length > 0) {
2201 			DPAA_SEC_ERR("No Memory for auth key");
2202 			rte_free(session->cipher_key.data);
2203 			return -ENOMEM;
2204 		}
2205 		memcpy(session->auth_key.data, auth_xform->key.data,
2206 				auth_xform->key.length);
2207 		session->auth_key.length = auth_xform->key.length;
2208 
2209 		switch (auth_xform->algo) {
2210 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
2211 		case RTE_CRYPTO_AUTH_MD5_HMAC:
2212 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
2213 		case RTE_CRYPTO_AUTH_SHA384_HMAC:
2214 		case RTE_CRYPTO_AUTH_SHA512_HMAC:
2215 		case RTE_CRYPTO_AUTH_AES_CMAC:
2216 			break;
2217 		default:
2218 			DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2219 				auth_xform->algo);
2220 			goto out;
2221 		}
2222 		session->auth_alg = auth_xform->algo;
2223 	} else {
2224 		session->auth_key.data = NULL;
2225 		session->auth_key.length = 0;
2226 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2227 	}
2228 
2229 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2230 		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2231 				sizeof(session->ip4_hdr));
2232 		session->ip4_hdr.ip_v = IPVERSION;
2233 		session->ip4_hdr.ip_hl = 5;
2234 		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2235 						sizeof(session->ip4_hdr));
2236 		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2237 		session->ip4_hdr.ip_id = 0;
2238 		session->ip4_hdr.ip_off = 0;
2239 		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2240 		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2241 				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2242 				: IPPROTO_AH;
2243 		session->ip4_hdr.ip_sum = 0;
2244 		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2245 		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2246 		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2247 						(void *)&session->ip4_hdr,
2248 						sizeof(struct ip));
2249 
2250 		session->encap_pdb.options =
2251 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2252 			PDBOPTS_ESP_OIHI_PDB_INL |
2253 			PDBOPTS_ESP_IVSRC |
2254 			PDBHMO_ESP_ENCAP_DTTL |
2255 			PDBHMO_ESP_SNR;
2256 		session->encap_pdb.spi = ipsec_xform->spi;
2257 		session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2258 
2259 		session->dir = DIR_ENC;
2260 	} else if (ipsec_xform->direction ==
2261 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2262 		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2263 		session->decap_pdb.options = sizeof(struct ip) << 16;
2264 		session->dir = DIR_DEC;
2265 	} else
2266 		goto out;
2267 	session->ctx_pool = internals->ctx_pool;
2268 	rte_spinlock_lock(&internals->lock);
2269 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2270 		session->inq[i] = dpaa_sec_attach_rxq(internals);
2271 		if (session->inq[i] == NULL) {
2272 			DPAA_SEC_ERR("unable to attach sec queue");
2273 			rte_spinlock_unlock(&internals->lock);
2274 			goto out;
2275 		}
2276 	}
2277 	rte_spinlock_unlock(&internals->lock);
2278 
2279 	return 0;
2280 out:
2281 	rte_free(session->auth_key.data);
2282 	rte_free(session->cipher_key.data);
2283 	memset(session, 0, sizeof(dpaa_sec_session));
2284 	return -1;
2285 }
2286 
2287 static int
2288 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2289 			  struct rte_security_session_conf *conf,
2290 			  void *sess)
2291 {
2292 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2293 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2294 	struct rte_crypto_auth_xform *auth_xform = NULL;
2295 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2296 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
2297 	struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2298 	uint32_t i;
2299 
2300 	PMD_INIT_FUNC_TRACE();
2301 
2302 	memset(session, 0, sizeof(dpaa_sec_session));
2303 
2304 	/* find xfrm types */
2305 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2306 		cipher_xform = &xform->cipher;
2307 		if (xform->next != NULL)
2308 			auth_xform = &xform->next->auth;
2309 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2310 		auth_xform = &xform->auth;
2311 		if (xform->next != NULL)
2312 			cipher_xform = &xform->next->cipher;
2313 	} else {
2314 		DPAA_SEC_ERR("Invalid crypto type");
2315 		return -EINVAL;
2316 	}
2317 
2318 	session->proto_alg = conf->protocol;
2319 	if (cipher_xform) {
2320 		session->cipher_key.data = rte_zmalloc(NULL,
2321 					       cipher_xform->key.length,
2322 					       RTE_CACHE_LINE_SIZE);
2323 		if (session->cipher_key.data == NULL &&
2324 				cipher_xform->key.length > 0) {
2325 			DPAA_SEC_ERR("No Memory for cipher key");
2326 			return -ENOMEM;
2327 		}
2328 		session->cipher_key.length = cipher_xform->key.length;
2329 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2330 			cipher_xform->key.length);
2331 		session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2332 					DIR_ENC : DIR_DEC;
2333 		session->cipher_alg = cipher_xform->algo;
2334 	} else {
2335 		session->cipher_key.data = NULL;
2336 		session->cipher_key.length = 0;
2337 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2338 		session->dir = DIR_ENC;
2339 	}
2340 
2341 	/* Auth is only applicable for control mode operation. */
2342 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2343 		if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) {
2344 			DPAA_SEC_ERR(
2345 				"PDCP Seq Num size should be 5 bits for cmode");
2346 			goto out;
2347 		}
2348 		if (auth_xform) {
2349 			session->auth_key.data = rte_zmalloc(NULL,
2350 							auth_xform->key.length,
2351 							RTE_CACHE_LINE_SIZE);
2352 			if (session->auth_key.data == NULL &&
2353 					auth_xform->key.length > 0) {
2354 				DPAA_SEC_ERR("No Memory for auth key");
2355 				rte_free(session->cipher_key.data);
2356 				return -ENOMEM;
2357 			}
2358 			session->auth_key.length = auth_xform->key.length;
2359 			memcpy(session->auth_key.data, auth_xform->key.data,
2360 					auth_xform->key.length);
2361 			session->auth_alg = auth_xform->algo;
2362 		} else {
2363 			session->auth_key.data = NULL;
2364 			session->auth_key.length = 0;
2365 			session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2366 		}
2367 	}
2368 	session->pdcp.domain = pdcp_xform->domain;
2369 	session->pdcp.bearer = pdcp_xform->bearer;
2370 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2371 	session->pdcp.sn_size = pdcp_xform->sn_size;
2372 #ifdef ENABLE_HFN_OVERRIDE
2373 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
2374 #endif
2375 	session->pdcp.hfn = pdcp_xform->hfn;
2376 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2377 
2378 	session->ctx_pool = dev_priv->ctx_pool;
2379 	rte_spinlock_lock(&dev_priv->lock);
2380 	for (i = 0; i < MAX_DPAA_CORES; i++) {
2381 		session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2382 		if (session->inq[i] == NULL) {
2383 			DPAA_SEC_ERR("unable to attach sec queue");
2384 			rte_spinlock_unlock(&dev_priv->lock);
2385 			goto out;
2386 		}
2387 	}
2388 	rte_spinlock_unlock(&dev_priv->lock);
2389 	return 0;
2390 out:
2391 	rte_free(session->auth_key.data);
2392 	rte_free(session->cipher_key.data);
2393 	memset(session, 0, sizeof(dpaa_sec_session));
2394 	return -1;
2395 }
2396 
2397 static int
2398 dpaa_sec_security_session_create(void *dev,
2399 				 struct rte_security_session_conf *conf,
2400 				 struct rte_security_session *sess,
2401 				 struct rte_mempool *mempool)
2402 {
2403 	void *sess_private_data;
2404 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2405 	int ret;
2406 
2407 	if (rte_mempool_get(mempool, &sess_private_data)) {
2408 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2409 		return -ENOMEM;
2410 	}
2411 
2412 	switch (conf->protocol) {
2413 	case RTE_SECURITY_PROTOCOL_IPSEC:
2414 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
2415 				sess_private_data);
2416 		break;
2417 	case RTE_SECURITY_PROTOCOL_PDCP:
2418 		ret = dpaa_sec_set_pdcp_session(cdev, conf,
2419 				sess_private_data);
2420 		break;
2421 	case RTE_SECURITY_PROTOCOL_MACSEC:
2422 		return -ENOTSUP;
2423 	default:
2424 		return -EINVAL;
2425 	}
2426 	if (ret != 0) {
2427 		DPAA_SEC_ERR("failed to configure session parameters");
2428 		/* Return session to mempool */
2429 		rte_mempool_put(mempool, sess_private_data);
2430 		return ret;
2431 	}
2432 
2433 	set_sec_session_private_data(sess, sess_private_data);
2434 
2435 	return ret;
2436 }
2437 
2438 /** Clear the memory of session so it doesn't leave key material behind */
2439 static int
2440 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2441 		struct rte_security_session *sess)
2442 {
2443 	PMD_INIT_FUNC_TRACE();
2444 	void *sess_priv = get_sec_session_private_data(sess);
2445 
2446 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2447 
2448 	if (sess_priv) {
2449 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2450 
2451 		rte_free(s->cipher_key.data);
2452 		rte_free(s->auth_key.data);
2453 		memset(sess, 0, sizeof(dpaa_sec_session));
2454 		set_sec_session_private_data(sess, NULL);
2455 		rte_mempool_put(sess_mp, sess_priv);
2456 	}
2457 	return 0;
2458 }
2459 
2460 
2461 static int
2462 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2463 		       struct rte_cryptodev_config *config __rte_unused)
2464 {
2465 
2466 	char str[20];
2467 	struct dpaa_sec_dev_private *internals;
2468 
2469 	PMD_INIT_FUNC_TRACE();
2470 
2471 	internals = dev->data->dev_private;
2472 	snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
2473 	if (!internals->ctx_pool) {
2474 		internals->ctx_pool = rte_mempool_create((const char *)str,
2475 							CTX_POOL_NUM_BUFS,
2476 							CTX_POOL_BUF_SIZE,
2477 							CTX_POOL_CACHE_SIZE, 0,
2478 							NULL, NULL, NULL, NULL,
2479 							SOCKET_ID_ANY, 0);
2480 		if (!internals->ctx_pool) {
2481 			DPAA_SEC_ERR("%s create failed\n", str);
2482 			return -ENOMEM;
2483 		}
2484 	} else
2485 		DPAA_SEC_INFO("mempool already created for dev_id : %d",
2486 				dev->data->dev_id);
2487 
2488 	return 0;
2489 }
2490 
2491 static int
2492 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2493 {
2494 	PMD_INIT_FUNC_TRACE();
2495 	return 0;
2496 }
2497 
2498 static void
2499 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2500 {
2501 	PMD_INIT_FUNC_TRACE();
2502 }
2503 
2504 static int
2505 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2506 {
2507 	struct dpaa_sec_dev_private *internals;
2508 
2509 	PMD_INIT_FUNC_TRACE();
2510 
2511 	if (dev == NULL)
2512 		return -ENOMEM;
2513 
2514 	internals = dev->data->dev_private;
2515 	rte_mempool_free(internals->ctx_pool);
2516 	internals->ctx_pool = NULL;
2517 
2518 	return 0;
2519 }
2520 
2521 static void
2522 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2523 		       struct rte_cryptodev_info *info)
2524 {
2525 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2526 
2527 	PMD_INIT_FUNC_TRACE();
2528 	if (info != NULL) {
2529 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2530 		info->feature_flags = dev->feature_flags;
2531 		info->capabilities = dpaa_sec_capabilities;
2532 		info->sym.max_nb_sessions = internals->max_nb_sessions;
2533 		info->driver_id = cryptodev_driver_id;
2534 	}
2535 }
2536 
2537 static struct rte_cryptodev_ops crypto_ops = {
2538 	.dev_configure	      = dpaa_sec_dev_configure,
2539 	.dev_start	      = dpaa_sec_dev_start,
2540 	.dev_stop	      = dpaa_sec_dev_stop,
2541 	.dev_close	      = dpaa_sec_dev_close,
2542 	.dev_infos_get        = dpaa_sec_dev_infos_get,
2543 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
2544 	.queue_pair_release   = dpaa_sec_queue_pair_release,
2545 	.queue_pair_count     = dpaa_sec_queue_pair_count,
2546 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
2547 	.sym_session_configure    = dpaa_sec_sym_session_configure,
2548 	.sym_session_clear        = dpaa_sec_sym_session_clear
2549 };
2550 
2551 static const struct rte_security_capability *
2552 dpaa_sec_capabilities_get(void *device __rte_unused)
2553 {
2554 	return dpaa_sec_security_cap;
2555 }
2556 
2557 static const struct rte_security_ops dpaa_sec_security_ops = {
2558 	.session_create = dpaa_sec_security_session_create,
2559 	.session_update = NULL,
2560 	.session_stats_get = NULL,
2561 	.session_destroy = dpaa_sec_security_session_destroy,
2562 	.set_pkt_metadata = NULL,
2563 	.capabilities_get = dpaa_sec_capabilities_get
2564 };
2565 
2566 static int
2567 dpaa_sec_uninit(struct rte_cryptodev *dev)
2568 {
2569 	struct dpaa_sec_dev_private *internals;
2570 
2571 	if (dev == NULL)
2572 		return -ENODEV;
2573 
2574 	internals = dev->data->dev_private;
2575 	rte_free(dev->security_ctx);
2576 
2577 	/* In case close has been called, internals->ctx_pool would be NULL */
2578 	rte_mempool_free(internals->ctx_pool);
2579 	rte_free(internals);
2580 
2581 	DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2582 		      dev->data->name, rte_socket_id());
2583 
2584 	return 0;
2585 }
2586 
2587 static int
2588 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2589 {
2590 	struct dpaa_sec_dev_private *internals;
2591 	struct rte_security_ctx *security_instance;
2592 	struct dpaa_sec_qp *qp;
2593 	uint32_t i, flags;
2594 	int ret;
2595 
2596 	PMD_INIT_FUNC_TRACE();
2597 
2598 	cryptodev->driver_id = cryptodev_driver_id;
2599 	cryptodev->dev_ops = &crypto_ops;
2600 
2601 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2602 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2603 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2604 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2605 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2606 			RTE_CRYPTODEV_FF_SECURITY |
2607 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2608 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2609 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2610 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2611 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2612 
2613 	internals = cryptodev->data->dev_private;
2614 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2615 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2616 
2617 	/*
2618 	 * For secondary processes, we don't initialise any further as primary
2619 	 * has already done this work. Only check we don't need a different
2620 	 * RX function
2621 	 */
2622 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2623 		DPAA_SEC_WARN("Device already init by primary process");
2624 		return 0;
2625 	}
2626 
2627 	/* Initialize security_ctx only for primary process*/
2628 	security_instance = rte_malloc("rte_security_instances_ops",
2629 				sizeof(struct rte_security_ctx), 0);
2630 	if (security_instance == NULL)
2631 		return -ENOMEM;
2632 	security_instance->device = (void *)cryptodev;
2633 	security_instance->ops = &dpaa_sec_security_ops;
2634 	security_instance->sess_cnt = 0;
2635 	cryptodev->security_ctx = security_instance;
2636 
2637 	rte_spinlock_init(&internals->lock);
2638 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2639 		/* init qman fq for queue pair */
2640 		qp = &internals->qps[i];
2641 		ret = dpaa_sec_init_tx(&qp->outq);
2642 		if (ret) {
2643 			DPAA_SEC_ERR("config tx of queue pair  %d", i);
2644 			goto init_error;
2645 		}
2646 	}
2647 
2648 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2649 		QMAN_FQ_FLAG_TO_DCPORTAL;
2650 	for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
2651 		/* create rx qman fq for sessions*/
2652 		ret = qman_create_fq(0, flags, &internals->inq[i]);
2653 		if (unlikely(ret != 0)) {
2654 			DPAA_SEC_ERR("sec qman_create_fq failed");
2655 			goto init_error;
2656 		}
2657 	}
2658 
2659 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2660 	return 0;
2661 
2662 init_error:
2663 	DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2664 
2665 	dpaa_sec_uninit(cryptodev);
2666 	return -EFAULT;
2667 }
2668 
2669 static int
2670 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2671 				struct rte_dpaa_device *dpaa_dev)
2672 {
2673 	struct rte_cryptodev *cryptodev;
2674 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2675 
2676 	int retval;
2677 
2678 	snprintf(cryptodev_name, sizeof(cryptodev_name), "dpaa_sec-%d",
2679 			dpaa_dev->id.dev_id);
2680 
2681 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2682 	if (cryptodev == NULL)
2683 		return -ENOMEM;
2684 
2685 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2686 		cryptodev->data->dev_private = rte_zmalloc_socket(
2687 					"cryptodev private structure",
2688 					sizeof(struct dpaa_sec_dev_private),
2689 					RTE_CACHE_LINE_SIZE,
2690 					rte_socket_id());
2691 
2692 		if (cryptodev->data->dev_private == NULL)
2693 			rte_panic("Cannot allocate memzone for private "
2694 					"device data");
2695 	}
2696 
2697 	dpaa_dev->crypto_dev = cryptodev;
2698 	cryptodev->device = &dpaa_dev->device;
2699 
2700 	/* init user callbacks */
2701 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
2702 
2703 	/* if sec device version is not configured */
2704 	if (!rta_get_sec_era()) {
2705 		const struct device_node *caam_node;
2706 
2707 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2708 			const uint32_t *prop = of_get_property(caam_node,
2709 					"fsl,sec-era",
2710 					NULL);
2711 			if (prop) {
2712 				rta_set_sec_era(
2713 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2714 				break;
2715 			}
2716 		}
2717 	}
2718 
2719 	/* Invoke PMD device initialization function */
2720 	retval = dpaa_sec_dev_init(cryptodev);
2721 	if (retval == 0)
2722 		return 0;
2723 
2724 	/* In case of error, cleanup is done */
2725 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2726 		rte_free(cryptodev->data->dev_private);
2727 
2728 	rte_cryptodev_pmd_release_device(cryptodev);
2729 
2730 	return -ENXIO;
2731 }
2732 
2733 static int
2734 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2735 {
2736 	struct rte_cryptodev *cryptodev;
2737 	int ret;
2738 
2739 	cryptodev = dpaa_dev->crypto_dev;
2740 	if (cryptodev == NULL)
2741 		return -ENODEV;
2742 
2743 	ret = dpaa_sec_uninit(cryptodev);
2744 	if (ret)
2745 		return ret;
2746 
2747 	return rte_cryptodev_pmd_destroy(cryptodev);
2748 }
2749 
2750 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2751 	.drv_type = FSL_DPAA_CRYPTO,
2752 	.driver = {
2753 		.name = "DPAA SEC PMD"
2754 	},
2755 	.probe = cryptodev_dpaa_sec_probe,
2756 	.remove = cryptodev_dpaa_sec_remove,
2757 };
2758 
2759 static struct cryptodev_driver dpaa_sec_crypto_drv;
2760 
2761 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2762 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2763 		cryptodev_driver_id);
2764 
2765 RTE_INIT(dpaa_sec_init_log)
2766 {
2767 	dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2768 	if (dpaa_logtype_sec >= 0)
2769 		rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
2770 }
2771