xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision 42c3576d44dd48602aba0820b98c07e2c4278c0e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 
27 #include <fsl_usd.h>
28 #include <fsl_qman.h>
29 #include <of.h>
30 
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
35 
36 #include <rte_dpaa_bus.h>
37 #include <dpaa_sec.h>
38 #include <dpaa_sec_log.h>
39 
40 enum rta_sec_era rta_sec_era;
41 
42 static uint8_t cryptodev_driver_id;
43 
44 static __thread struct rte_crypto_op **dpaa_sec_ops;
45 static __thread int dpaa_sec_op_nb;
46 
47 static int
48 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
49 
50 static inline void
51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
52 {
53 	if (!ctx->fd_status) {
54 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
55 	} else {
56 		PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
57 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
58 	}
59 
60 	/* report op status to sym->op and then free the ctx memeory  */
61 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
62 }
63 
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
66 {
67 	struct dpaa_sec_op_ctx *ctx;
68 	int retval;
69 
70 	retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
71 	if (!ctx || retval) {
72 		PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
73 		return NULL;
74 	}
75 	/*
76 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
77 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
78 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
79 	 * each packet, memset is costlier than dcbz_64().
80 	 */
81 	dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
82 	dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
83 	dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
84 	dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
85 
86 	ctx->ctx_pool = ses->ctx_pool;
87 	ctx->vtop_offset = (uint64_t) ctx
88 				- rte_mempool_virt2iova(ctx);
89 
90 	return ctx;
91 }
92 
93 static inline rte_iova_t
94 dpaa_mem_vtop(void *vaddr)
95 {
96 	const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
97 	uint64_t vaddr_64, paddr;
98 	int i;
99 
100 	vaddr_64 = (uint64_t)vaddr;
101 	for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
102 		if (vaddr_64 >= memseg[i].addr_64 &&
103 		    vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
104 			paddr = memseg[i].iova +
105 				(vaddr_64 - memseg[i].addr_64);
106 
107 			return (rte_iova_t)paddr;
108 		}
109 	}
110 	return (rte_iova_t)(NULL);
111 }
112 
113 /* virtual address conversin when mempool support is available for ctx */
114 static inline phys_addr_t
115 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
116 {
117 	return (uint64_t)vaddr - ctx->vtop_offset;
118 }
119 
120 static inline void *
121 dpaa_mem_ptov(rte_iova_t paddr)
122 {
123 	const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
124 	int i;
125 
126 	for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
127 		if (paddr >= memseg[i].iova &&
128 		    (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
129 			return (void *)(memseg[i].addr_64 +
130 					(paddr - memseg[i].iova));
131 	}
132 	return NULL;
133 }
134 
135 static void
136 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
137 		   struct qman_fq *fq,
138 		   const struct qm_mr_entry *msg)
139 {
140 	RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
141 		   fq->fqid, msg->ern.rc, msg->ern.seqnum);
142 }
143 
144 /* initialize the queue with dest chan as caam chan so that
145  * all the packets in this queue could be dispatched into caam
146  */
147 static int
148 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
149 		 uint32_t fqid_out)
150 {
151 	struct qm_mcc_initfq fq_opts;
152 	uint32_t flags;
153 	int ret = -1;
154 
155 	/* Clear FQ options */
156 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
157 
158 	flags = QMAN_INITFQ_FLAG_SCHED;
159 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
160 			  QM_INITFQ_WE_CONTEXTB;
161 
162 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
163 	fq_opts.fqd.context_b = fqid_out;
164 	fq_opts.fqd.dest.channel = qm_channel_caam;
165 	fq_opts.fqd.dest.wq = 0;
166 
167 	fq_in->cb.ern  = ern_sec_fq_handler;
168 
169 	PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
170 
171 	ret = qman_init_fq(fq_in, flags, &fq_opts);
172 	if (unlikely(ret != 0))
173 		PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
174 
175 	return ret;
176 }
177 
178 /* something is put into in_fq and caam put the crypto result into out_fq */
179 static enum qman_cb_dqrr_result
180 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
181 		  struct qman_fq *fq __always_unused,
182 		  const struct qm_dqrr_entry *dqrr)
183 {
184 	const struct qm_fd *fd;
185 	struct dpaa_sec_job *job;
186 	struct dpaa_sec_op_ctx *ctx;
187 
188 	if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
189 		return qman_cb_dqrr_defer;
190 
191 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
192 		return qman_cb_dqrr_consume;
193 
194 	fd = &dqrr->fd;
195 	/* sg is embedded in an op ctx,
196 	 * sg[0] is for output
197 	 * sg[1] for input
198 	 */
199 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
200 
201 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
202 	ctx->fd_status = fd->status;
203 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
204 		struct qm_sg_entry *sg_out;
205 		uint32_t len;
206 
207 		sg_out = &job->sg[0];
208 		hw_sg_to_cpu(sg_out);
209 		len = sg_out->length;
210 		ctx->op->sym->m_src->pkt_len = len;
211 		ctx->op->sym->m_src->data_len = len;
212 	}
213 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
214 	dpaa_sec_op_ending(ctx);
215 
216 	return qman_cb_dqrr_consume;
217 }
218 
219 /* caam result is put into this queue */
220 static int
221 dpaa_sec_init_tx(struct qman_fq *fq)
222 {
223 	int ret;
224 	struct qm_mcc_initfq opts;
225 	uint32_t flags;
226 
227 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
228 		QMAN_FQ_FLAG_DYNAMIC_FQID;
229 
230 	ret = qman_create_fq(0, flags, fq);
231 	if (unlikely(ret)) {
232 		PMD_INIT_LOG(ERR, "qman_create_fq failed");
233 		return ret;
234 	}
235 
236 	memset(&opts, 0, sizeof(opts));
237 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
238 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
239 
240 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
241 
242 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
243 	fq->cb.ern  = ern_sec_fq_handler;
244 
245 	ret = qman_init_fq(fq, 0, &opts);
246 	if (unlikely(ret)) {
247 		PMD_INIT_LOG(ERR, "unable to init caam source fq!");
248 		return ret;
249 	}
250 
251 	return ret;
252 }
253 
254 static inline int is_cipher_only(dpaa_sec_session *ses)
255 {
256 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
257 		(ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
258 }
259 
260 static inline int is_auth_only(dpaa_sec_session *ses)
261 {
262 	return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
263 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
264 }
265 
266 static inline int is_aead(dpaa_sec_session *ses)
267 {
268 	return ((ses->cipher_alg == 0) &&
269 		(ses->auth_alg == 0) &&
270 		(ses->aead_alg != 0));
271 }
272 
273 static inline int is_auth_cipher(dpaa_sec_session *ses)
274 {
275 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
276 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
277 		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
278 }
279 
280 static inline int is_proto_ipsec(dpaa_sec_session *ses)
281 {
282 	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
283 }
284 
285 static inline int is_encode(dpaa_sec_session *ses)
286 {
287 	return ses->dir == DIR_ENC;
288 }
289 
290 static inline int is_decode(dpaa_sec_session *ses)
291 {
292 	return ses->dir == DIR_DEC;
293 }
294 
295 static inline void
296 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
297 {
298 	switch (ses->auth_alg) {
299 	case RTE_CRYPTO_AUTH_NULL:
300 		ses->digest_length = 0;
301 		break;
302 	case RTE_CRYPTO_AUTH_MD5_HMAC:
303 		alginfo_a->algtype =
304 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
305 			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
306 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
307 		break;
308 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
309 		alginfo_a->algtype =
310 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
311 			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
312 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
313 		break;
314 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
315 		alginfo_a->algtype =
316 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
317 			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
318 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
319 		break;
320 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
321 		alginfo_a->algtype =
322 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
323 			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
324 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
325 		break;
326 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
327 		alginfo_a->algtype =
328 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
329 			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
330 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
331 		break;
332 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
333 		alginfo_a->algtype =
334 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
335 			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
336 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
337 		break;
338 	default:
339 		PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
340 	}
341 }
342 
343 static inline void
344 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
345 {
346 	switch (ses->cipher_alg) {
347 	case RTE_CRYPTO_CIPHER_NULL:
348 		break;
349 	case RTE_CRYPTO_CIPHER_AES_CBC:
350 		alginfo_c->algtype =
351 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
352 			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
353 		alginfo_c->algmode = OP_ALG_AAI_CBC;
354 		break;
355 	case RTE_CRYPTO_CIPHER_3DES_CBC:
356 		alginfo_c->algtype =
357 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
358 			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
359 		alginfo_c->algmode = OP_ALG_AAI_CBC;
360 		break;
361 	case RTE_CRYPTO_CIPHER_AES_CTR:
362 		alginfo_c->algtype =
363 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
364 			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
365 		alginfo_c->algmode = OP_ALG_AAI_CTR;
366 		break;
367 	default:
368 		PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
369 	}
370 }
371 
372 static inline void
373 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
374 {
375 	switch (ses->aead_alg) {
376 	case RTE_CRYPTO_AEAD_AES_GCM:
377 		alginfo->algtype = OP_ALG_ALGSEL_AES;
378 		alginfo->algmode = OP_ALG_AAI_GCM;
379 		break;
380 	default:
381 		PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
382 	}
383 }
384 
385 
386 /* prepare command block of the session */
387 static int
388 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
389 {
390 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
391 	uint32_t shared_desc_len = 0;
392 	struct sec_cdb *cdb = &ses->cdb;
393 	int err;
394 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
395 	int swap = false;
396 #else
397 	int swap = true;
398 #endif
399 
400 	memset(cdb, 0, sizeof(struct sec_cdb));
401 
402 	if (is_cipher_only(ses)) {
403 		caam_cipher_alg(ses, &alginfo_c);
404 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
405 			PMD_TX_LOG(ERR, "not supported cipher alg\n");
406 			return -ENOTSUP;
407 		}
408 
409 		alginfo_c.key = (uint64_t)ses->cipher_key.data;
410 		alginfo_c.keylen = ses->cipher_key.length;
411 		alginfo_c.key_enc_flags = 0;
412 		alginfo_c.key_type = RTA_DATA_IMM;
413 
414 		shared_desc_len = cnstr_shdsc_blkcipher(
415 						cdb->sh_desc, true,
416 						swap, &alginfo_c,
417 						NULL,
418 						ses->iv.length,
419 						ses->dir);
420 	} else if (is_auth_only(ses)) {
421 		caam_auth_alg(ses, &alginfo_a);
422 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
423 			PMD_TX_LOG(ERR, "not supported auth alg\n");
424 			return -ENOTSUP;
425 		}
426 
427 		alginfo_a.key = (uint64_t)ses->auth_key.data;
428 		alginfo_a.keylen = ses->auth_key.length;
429 		alginfo_a.key_enc_flags = 0;
430 		alginfo_a.key_type = RTA_DATA_IMM;
431 
432 		shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
433 						   swap, &alginfo_a,
434 						   !ses->dir,
435 						   ses->digest_length);
436 	} else if (is_aead(ses)) {
437 		caam_aead_alg(ses, &alginfo);
438 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
439 			PMD_TX_LOG(ERR, "not supported aead alg\n");
440 			return -ENOTSUP;
441 		}
442 		alginfo.key = (uint64_t)ses->aead_key.data;
443 		alginfo.keylen = ses->aead_key.length;
444 		alginfo.key_enc_flags = 0;
445 		alginfo.key_type = RTA_DATA_IMM;
446 
447 		if (ses->dir == DIR_ENC)
448 			shared_desc_len = cnstr_shdsc_gcm_encap(
449 					cdb->sh_desc, true, swap,
450 					&alginfo,
451 					ses->iv.length,
452 					ses->digest_length);
453 		else
454 			shared_desc_len = cnstr_shdsc_gcm_decap(
455 					cdb->sh_desc, true, swap,
456 					&alginfo,
457 					ses->iv.length,
458 					ses->digest_length);
459 	} else {
460 		caam_cipher_alg(ses, &alginfo_c);
461 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
462 			PMD_TX_LOG(ERR, "not supported cipher alg\n");
463 			return -ENOTSUP;
464 		}
465 
466 		alginfo_c.key = (uint64_t)ses->cipher_key.data;
467 		alginfo_c.keylen = ses->cipher_key.length;
468 		alginfo_c.key_enc_flags = 0;
469 		alginfo_c.key_type = RTA_DATA_IMM;
470 
471 		caam_auth_alg(ses, &alginfo_a);
472 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
473 			PMD_TX_LOG(ERR, "not supported auth alg\n");
474 			return -ENOTSUP;
475 		}
476 
477 		alginfo_a.key = (uint64_t)ses->auth_key.data;
478 		alginfo_a.keylen = ses->auth_key.length;
479 		alginfo_a.key_enc_flags = 0;
480 		alginfo_a.key_type = RTA_DATA_IMM;
481 
482 		cdb->sh_desc[0] = alginfo_c.keylen;
483 		cdb->sh_desc[1] = alginfo_a.keylen;
484 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
485 				       MIN_JOB_DESC_SIZE,
486 				       (unsigned int *)cdb->sh_desc,
487 				       &cdb->sh_desc[2], 2);
488 
489 		if (err < 0) {
490 			PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
491 			return err;
492 		}
493 		if (cdb->sh_desc[2] & 1)
494 			alginfo_c.key_type = RTA_DATA_IMM;
495 		else {
496 			alginfo_c.key = (uint64_t)dpaa_mem_vtop(
497 							(void *)alginfo_c.key);
498 			alginfo_c.key_type = RTA_DATA_PTR;
499 		}
500 		if (cdb->sh_desc[2] & (1<<1))
501 			alginfo_a.key_type = RTA_DATA_IMM;
502 		else {
503 			alginfo_a.key = (uint64_t)dpaa_mem_vtop(
504 							(void *)alginfo_a.key);
505 			alginfo_a.key_type = RTA_DATA_PTR;
506 		}
507 		cdb->sh_desc[0] = 0;
508 		cdb->sh_desc[1] = 0;
509 		cdb->sh_desc[2] = 0;
510 		if (is_proto_ipsec(ses)) {
511 			if (ses->dir == DIR_ENC) {
512 				shared_desc_len = cnstr_shdsc_ipsec_new_encap(
513 						cdb->sh_desc,
514 						true, swap, &ses->encap_pdb,
515 						(uint8_t *)&ses->ip4_hdr,
516 						&alginfo_c, &alginfo_a);
517 			} else if (ses->dir == DIR_DEC) {
518 				shared_desc_len = cnstr_shdsc_ipsec_new_decap(
519 						cdb->sh_desc,
520 						true, swap, &ses->decap_pdb,
521 						&alginfo_c, &alginfo_a);
522 			}
523 		} else {
524 			/* Auth_only_len is set as 0 here and it will be
525 			 * overwritten in fd for each packet.
526 			 */
527 			shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
528 					true, swap, &alginfo_c, &alginfo_a,
529 					ses->iv.length, 0,
530 					ses->digest_length, ses->dir);
531 		}
532 	}
533 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
534 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
535 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
536 
537 	return 0;
538 }
539 
540 /* qp is lockless, should be accessed by only one thread */
541 static int
542 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
543 {
544 	struct qman_fq *fq;
545 	unsigned int pkts = 0;
546 	int ret;
547 	struct qm_dqrr_entry *dq;
548 
549 	fq = &qp->outq;
550 	ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
551 				DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
552 	if (ret)
553 		return 0;
554 
555 	do {
556 		const struct qm_fd *fd;
557 		struct dpaa_sec_job *job;
558 		struct dpaa_sec_op_ctx *ctx;
559 		struct rte_crypto_op *op;
560 
561 		dq = qman_dequeue(fq);
562 		if (!dq)
563 			continue;
564 
565 		fd = &dq->fd;
566 		/* sg is embedded in an op ctx,
567 		 * sg[0] is for output
568 		 * sg[1] for input
569 		 */
570 		job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
571 
572 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
573 		ctx->fd_status = fd->status;
574 		op = ctx->op;
575 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
576 			struct qm_sg_entry *sg_out;
577 			uint32_t len;
578 
579 			sg_out = &job->sg[0];
580 			hw_sg_to_cpu(sg_out);
581 			len = sg_out->length;
582 			op->sym->m_src->pkt_len = len;
583 			op->sym->m_src->data_len = len;
584 		}
585 		if (!ctx->fd_status) {
586 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
587 		} else {
588 			printf("\nSEC return err: 0x%x", ctx->fd_status);
589 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
590 		}
591 		ops[pkts++] = op;
592 
593 		/* report op status to sym->op and then free the ctx memeory */
594 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
595 
596 		qman_dqrr_consume(fq, dq);
597 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
598 
599 	return pkts;
600 }
601 
602 /**
603  * packet looks like:
604  *		|<----data_len------->|
605  *    |ip_header|ah_header|icv|payload|
606  *              ^
607  *		|
608  *	   mbuf->pkt.data
609  */
610 static inline struct dpaa_sec_job *
611 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
612 {
613 	struct rte_crypto_sym_op *sym = op->sym;
614 	struct rte_mbuf *mbuf = sym->m_src;
615 	struct dpaa_sec_job *cf;
616 	struct dpaa_sec_op_ctx *ctx;
617 	struct qm_sg_entry *sg;
618 	rte_iova_t start_addr;
619 	uint8_t *old_digest;
620 
621 	ctx = dpaa_sec_alloc_ctx(ses);
622 	if (!ctx)
623 		return NULL;
624 
625 	cf = &ctx->job;
626 	ctx->op = op;
627 	old_digest = ctx->digest;
628 
629 	start_addr = rte_pktmbuf_iova(mbuf);
630 	/* output */
631 	sg = &cf->sg[0];
632 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
633 	sg->length = ses->digest_length;
634 	cpu_to_hw_sg(sg);
635 
636 	/* input */
637 	sg = &cf->sg[1];
638 	if (is_decode(ses)) {
639 		/* need to extend the input to a compound frame */
640 		sg->extension = 1;
641 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
642 		sg->length = sym->auth.data.length + ses->digest_length;
643 		sg->final = 1;
644 		cpu_to_hw_sg(sg);
645 
646 		sg = &cf->sg[2];
647 		/* hash result or digest, save digest first */
648 		rte_memcpy(old_digest, sym->auth.digest.data,
649 			   ses->digest_length);
650 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
651 		sg->length = sym->auth.data.length;
652 		cpu_to_hw_sg(sg);
653 
654 		/* let's check digest by hw */
655 		start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
656 		sg++;
657 		qm_sg_entry_set64(sg, start_addr);
658 		sg->length = ses->digest_length;
659 		sg->final = 1;
660 		cpu_to_hw_sg(sg);
661 	} else {
662 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
663 		sg->length = sym->auth.data.length;
664 		sg->final = 1;
665 		cpu_to_hw_sg(sg);
666 	}
667 
668 	return cf;
669 }
670 
671 static inline struct dpaa_sec_job *
672 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
673 {
674 	struct rte_crypto_sym_op *sym = op->sym;
675 	struct dpaa_sec_job *cf;
676 	struct dpaa_sec_op_ctx *ctx;
677 	struct qm_sg_entry *sg;
678 	rte_iova_t src_start_addr, dst_start_addr;
679 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
680 			ses->iv.offset);
681 
682 	ctx = dpaa_sec_alloc_ctx(ses);
683 	if (!ctx)
684 		return NULL;
685 
686 	cf = &ctx->job;
687 	ctx->op = op;
688 
689 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
690 
691 	if (sym->m_dst)
692 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
693 	else
694 		dst_start_addr = src_start_addr;
695 
696 	/* output */
697 	sg = &cf->sg[0];
698 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
699 	sg->length = sym->cipher.data.length + ses->iv.length;
700 	cpu_to_hw_sg(sg);
701 
702 	/* input */
703 	sg = &cf->sg[1];
704 
705 	/* need to extend the input to a compound frame */
706 	sg->extension = 1;
707 	sg->final = 1;
708 	sg->length = sym->cipher.data.length + ses->iv.length;
709 	qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
710 	cpu_to_hw_sg(sg);
711 
712 	sg = &cf->sg[2];
713 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
714 	sg->length = ses->iv.length;
715 	cpu_to_hw_sg(sg);
716 
717 	sg++;
718 	qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
719 	sg->length = sym->cipher.data.length;
720 	sg->final = 1;
721 	cpu_to_hw_sg(sg);
722 
723 	return cf;
724 }
725 
726 static inline struct dpaa_sec_job *
727 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
728 {
729 	struct rte_crypto_sym_op *sym = op->sym;
730 	struct dpaa_sec_job *cf;
731 	struct dpaa_sec_op_ctx *ctx;
732 	struct qm_sg_entry *sg;
733 	uint32_t length = 0;
734 	rte_iova_t src_start_addr, dst_start_addr;
735 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
736 			ses->iv.offset);
737 
738 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
739 
740 	if (sym->m_dst)
741 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
742 	else
743 		dst_start_addr = src_start_addr;
744 
745 	ctx = dpaa_sec_alloc_ctx(ses);
746 	if (!ctx)
747 		return NULL;
748 
749 	cf = &ctx->job;
750 	ctx->op = op;
751 
752 	/* input */
753 	rte_prefetch0(cf->sg);
754 	sg = &cf->sg[2];
755 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
756 	if (is_encode(ses)) {
757 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
758 		sg->length = ses->iv.length;
759 		length += sg->length;
760 		cpu_to_hw_sg(sg);
761 
762 		sg++;
763 		if (ses->auth_only_len) {
764 			qm_sg_entry_set64(sg,
765 					  dpaa_mem_vtop(sym->aead.aad.data));
766 			sg->length = ses->auth_only_len;
767 			length += sg->length;
768 			cpu_to_hw_sg(sg);
769 			sg++;
770 		}
771 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
772 		sg->length = sym->aead.data.length;
773 		length += sg->length;
774 		sg->final = 1;
775 		cpu_to_hw_sg(sg);
776 	} else {
777 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
778 		sg->length = ses->iv.length;
779 		length += sg->length;
780 		cpu_to_hw_sg(sg);
781 
782 		sg++;
783 		if (ses->auth_only_len) {
784 			qm_sg_entry_set64(sg,
785 					  dpaa_mem_vtop(sym->aead.aad.data));
786 			sg->length = ses->auth_only_len;
787 			length += sg->length;
788 			cpu_to_hw_sg(sg);
789 			sg++;
790 		}
791 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
792 		sg->length = sym->aead.data.length;
793 		length += sg->length;
794 		cpu_to_hw_sg(sg);
795 
796 		memcpy(ctx->digest, sym->aead.digest.data,
797 		       ses->digest_length);
798 		sg++;
799 
800 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
801 		sg->length = ses->digest_length;
802 		length += sg->length;
803 		sg->final = 1;
804 		cpu_to_hw_sg(sg);
805 	}
806 	/* input compound frame */
807 	cf->sg[1].length = length;
808 	cf->sg[1].extension = 1;
809 	cf->sg[1].final = 1;
810 	cpu_to_hw_sg(&cf->sg[1]);
811 
812 	/* output */
813 	sg++;
814 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
815 	qm_sg_entry_set64(sg,
816 		dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
817 	sg->length = sym->aead.data.length + ses->auth_only_len;
818 	length = sg->length;
819 	if (is_encode(ses)) {
820 		cpu_to_hw_sg(sg);
821 		/* set auth output */
822 		sg++;
823 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
824 		sg->length = ses->digest_length;
825 		length += sg->length;
826 	}
827 	sg->final = 1;
828 	cpu_to_hw_sg(sg);
829 
830 	/* output compound frame */
831 	cf->sg[0].length = length;
832 	cf->sg[0].extension = 1;
833 	cpu_to_hw_sg(&cf->sg[0]);
834 
835 	return cf;
836 }
837 
838 static inline struct dpaa_sec_job *
839 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
840 {
841 	struct rte_crypto_sym_op *sym = op->sym;
842 	struct dpaa_sec_job *cf;
843 	struct dpaa_sec_op_ctx *ctx;
844 	struct qm_sg_entry *sg;
845 	rte_iova_t src_start_addr, dst_start_addr;
846 	uint32_t length = 0;
847 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
848 			ses->iv.offset);
849 
850 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
851 	if (sym->m_dst)
852 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
853 	else
854 		dst_start_addr = src_start_addr;
855 
856 	ctx = dpaa_sec_alloc_ctx(ses);
857 	if (!ctx)
858 		return NULL;
859 
860 	cf = &ctx->job;
861 	ctx->op = op;
862 
863 	/* input */
864 	rte_prefetch0(cf->sg);
865 	sg = &cf->sg[2];
866 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
867 	if (is_encode(ses)) {
868 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
869 		sg->length = ses->iv.length;
870 		length += sg->length;
871 		cpu_to_hw_sg(sg);
872 
873 		sg++;
874 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
875 		sg->length = sym->auth.data.length;
876 		length += sg->length;
877 		sg->final = 1;
878 		cpu_to_hw_sg(sg);
879 	} else {
880 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
881 		sg->length = ses->iv.length;
882 		length += sg->length;
883 		cpu_to_hw_sg(sg);
884 
885 		sg++;
886 
887 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
888 		sg->length = sym->auth.data.length;
889 		length += sg->length;
890 		cpu_to_hw_sg(sg);
891 
892 		memcpy(ctx->digest, sym->auth.digest.data,
893 		       ses->digest_length);
894 		sg++;
895 
896 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
897 		sg->length = ses->digest_length;
898 		length += sg->length;
899 		sg->final = 1;
900 		cpu_to_hw_sg(sg);
901 	}
902 	/* input compound frame */
903 	cf->sg[1].length = length;
904 	cf->sg[1].extension = 1;
905 	cf->sg[1].final = 1;
906 	cpu_to_hw_sg(&cf->sg[1]);
907 
908 	/* output */
909 	sg++;
910 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
911 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
912 	sg->length = sym->cipher.data.length;
913 	length = sg->length;
914 	if (is_encode(ses)) {
915 		cpu_to_hw_sg(sg);
916 		/* set auth output */
917 		sg++;
918 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
919 		sg->length = ses->digest_length;
920 		length += sg->length;
921 	}
922 	sg->final = 1;
923 	cpu_to_hw_sg(sg);
924 
925 	/* output compound frame */
926 	cf->sg[0].length = length;
927 	cf->sg[0].extension = 1;
928 	cpu_to_hw_sg(&cf->sg[0]);
929 
930 	return cf;
931 }
932 
933 static inline struct dpaa_sec_job *
934 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
935 {
936 	struct rte_crypto_sym_op *sym = op->sym;
937 	struct dpaa_sec_job *cf;
938 	struct dpaa_sec_op_ctx *ctx;
939 	struct qm_sg_entry *sg;
940 	phys_addr_t src_start_addr, dst_start_addr;
941 
942 	ctx = dpaa_sec_alloc_ctx(ses);
943 	if (!ctx)
944 		return NULL;
945 	cf = &ctx->job;
946 	ctx->op = op;
947 
948 	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
949 
950 	if (sym->m_dst)
951 		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
952 	else
953 		dst_start_addr = src_start_addr;
954 
955 	/* input */
956 	sg = &cf->sg[1];
957 	qm_sg_entry_set64(sg, src_start_addr);
958 	sg->length = sym->m_src->pkt_len;
959 	sg->final = 1;
960 	cpu_to_hw_sg(sg);
961 
962 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
963 	/* output */
964 	sg = &cf->sg[0];
965 	qm_sg_entry_set64(sg, dst_start_addr);
966 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
967 	cpu_to_hw_sg(sg);
968 
969 	return cf;
970 }
971 
972 static uint16_t
973 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
974 		       uint16_t nb_ops)
975 {
976 	/* Function to transmit the frames to given device and queuepair */
977 	uint32_t loop;
978 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
979 	uint16_t num_tx = 0;
980 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
981 	uint32_t frames_to_send;
982 	struct rte_crypto_op *op;
983 	struct dpaa_sec_job *cf;
984 	dpaa_sec_session *ses;
985 	struct dpaa_sec_op_ctx *ctx;
986 	uint32_t auth_only_len;
987 	struct qman_fq *inq[DPAA_SEC_BURST];
988 
989 	while (nb_ops) {
990 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
991 				DPAA_SEC_BURST : nb_ops;
992 		for (loop = 0; loop < frames_to_send; loop++) {
993 			op = *(ops++);
994 			switch (op->sess_type) {
995 			case RTE_CRYPTO_OP_WITH_SESSION:
996 				ses = (dpaa_sec_session *)
997 					get_session_private_data(
998 							op->sym->session,
999 							cryptodev_driver_id);
1000 				break;
1001 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1002 				ses = (dpaa_sec_session *)
1003 					get_sec_session_private_data(
1004 							op->sym->sec_session);
1005 				break;
1006 			default:
1007 				PMD_TX_LOG(ERR,
1008 					"sessionless crypto op not supported");
1009 				frames_to_send = loop;
1010 				nb_ops = loop;
1011 				goto send_pkts;
1012 			}
1013 			if (unlikely(!ses->qp || ses->qp != qp)) {
1014 				PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
1015 						ses->qp, qp);
1016 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1017 					frames_to_send = loop;
1018 					nb_ops = loop;
1019 					goto send_pkts;
1020 				}
1021 			}
1022 
1023 			/*
1024 			 * Segmented buffer is not supported.
1025 			 */
1026 			if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1027 				op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1028 				frames_to_send = loop;
1029 				nb_ops = loop;
1030 				goto send_pkts;
1031 			}
1032 			auth_only_len = op->sym->auth.data.length -
1033 						op->sym->cipher.data.length;
1034 
1035 			if (is_auth_only(ses)) {
1036 				cf = build_auth_only(op, ses);
1037 			} else if (is_cipher_only(ses)) {
1038 				cf = build_cipher_only(op, ses);
1039 			} else if (is_aead(ses)) {
1040 				cf = build_cipher_auth_gcm(op, ses);
1041 				auth_only_len = ses->auth_only_len;
1042 			} else if (is_auth_cipher(ses)) {
1043 				cf = build_cipher_auth(op, ses);
1044 			} else if (is_proto_ipsec(ses)) {
1045 				cf = build_proto(op, ses);
1046 			} else {
1047 				PMD_TX_LOG(ERR, "not supported sec op");
1048 				frames_to_send = loop;
1049 				nb_ops = loop;
1050 				goto send_pkts;
1051 			}
1052 			if (unlikely(!cf)) {
1053 				frames_to_send = loop;
1054 				nb_ops = loop;
1055 				goto send_pkts;
1056 			}
1057 
1058 			fd = &fds[loop];
1059 			inq[loop] = ses->inq;
1060 			fd->opaque_addr = 0;
1061 			fd->cmd = 0;
1062 			ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
1063 			qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
1064 			fd->_format1 = qm_fd_compound;
1065 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
1066 			/* Auth_only_len is set as 0 in descriptor and it is
1067 			 * overwritten here in the fd.cmd which will update
1068 			 * the DPOVRD reg.
1069 			 */
1070 			if (auth_only_len)
1071 				fd->cmd = 0x80000000 | auth_only_len;
1072 
1073 		}
1074 send_pkts:
1075 		loop = 0;
1076 		while (loop < frames_to_send) {
1077 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1078 					frames_to_send - loop);
1079 		}
1080 		nb_ops -= frames_to_send;
1081 		num_tx += frames_to_send;
1082 	}
1083 
1084 	dpaa_qp->tx_pkts += num_tx;
1085 	dpaa_qp->tx_errs += nb_ops - num_tx;
1086 
1087 	return num_tx;
1088 }
1089 
1090 static uint16_t
1091 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1092 		       uint16_t nb_ops)
1093 {
1094 	uint16_t num_rx;
1095 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1096 
1097 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1098 
1099 	dpaa_qp->rx_pkts += num_rx;
1100 	dpaa_qp->rx_errs += nb_ops - num_rx;
1101 
1102 	PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
1103 
1104 	return num_rx;
1105 }
1106 
1107 /** Release queue pair */
1108 static int
1109 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1110 			    uint16_t qp_id)
1111 {
1112 	struct dpaa_sec_dev_private *internals;
1113 	struct dpaa_sec_qp *qp = NULL;
1114 
1115 	PMD_INIT_FUNC_TRACE();
1116 
1117 	PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
1118 
1119 	internals = dev->data->dev_private;
1120 	if (qp_id >= internals->max_nb_queue_pairs) {
1121 		PMD_INIT_LOG(ERR, "Max supported qpid %d",
1122 			     internals->max_nb_queue_pairs);
1123 		return -EINVAL;
1124 	}
1125 
1126 	qp = &internals->qps[qp_id];
1127 	qp->internals = NULL;
1128 	dev->data->queue_pairs[qp_id] = NULL;
1129 
1130 	return 0;
1131 }
1132 
1133 /** Setup a queue pair */
1134 static int
1135 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1136 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1137 		__rte_unused int socket_id,
1138 		__rte_unused struct rte_mempool *session_pool)
1139 {
1140 	struct dpaa_sec_dev_private *internals;
1141 	struct dpaa_sec_qp *qp = NULL;
1142 
1143 	PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1144 		     dev, qp_id, qp_conf);
1145 
1146 	internals = dev->data->dev_private;
1147 	if (qp_id >= internals->max_nb_queue_pairs) {
1148 		PMD_INIT_LOG(ERR, "Max supported qpid %d",
1149 			     internals->max_nb_queue_pairs);
1150 		return -EINVAL;
1151 	}
1152 
1153 	qp = &internals->qps[qp_id];
1154 	qp->internals = internals;
1155 	dev->data->queue_pairs[qp_id] = qp;
1156 
1157 	return 0;
1158 }
1159 
1160 /** Start queue pair */
1161 static int
1162 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1163 			  __rte_unused uint16_t queue_pair_id)
1164 {
1165 	PMD_INIT_FUNC_TRACE();
1166 
1167 	return 0;
1168 }
1169 
1170 /** Stop queue pair */
1171 static int
1172 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1173 			 __rte_unused uint16_t queue_pair_id)
1174 {
1175 	PMD_INIT_FUNC_TRACE();
1176 
1177 	return 0;
1178 }
1179 
1180 /** Return the number of allocated queue pairs */
1181 static uint32_t
1182 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1183 {
1184 	PMD_INIT_FUNC_TRACE();
1185 
1186 	return dev->data->nb_queue_pairs;
1187 }
1188 
1189 /** Returns the size of session structure */
1190 static unsigned int
1191 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1192 {
1193 	PMD_INIT_FUNC_TRACE();
1194 
1195 	return sizeof(dpaa_sec_session);
1196 }
1197 
1198 static int
1199 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1200 		     struct rte_crypto_sym_xform *xform,
1201 		     dpaa_sec_session *session)
1202 {
1203 	session->cipher_alg = xform->cipher.algo;
1204 	session->iv.length = xform->cipher.iv.length;
1205 	session->iv.offset = xform->cipher.iv.offset;
1206 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1207 					       RTE_CACHE_LINE_SIZE);
1208 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1209 		PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1210 		return -ENOMEM;
1211 	}
1212 	session->cipher_key.length = xform->cipher.key.length;
1213 
1214 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1215 	       xform->cipher.key.length);
1216 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1217 			DIR_ENC : DIR_DEC;
1218 
1219 	return 0;
1220 }
1221 
1222 static int
1223 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1224 		   struct rte_crypto_sym_xform *xform,
1225 		   dpaa_sec_session *session)
1226 {
1227 	session->auth_alg = xform->auth.algo;
1228 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1229 					     RTE_CACHE_LINE_SIZE);
1230 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1231 		PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1232 		return -ENOMEM;
1233 	}
1234 	session->auth_key.length = xform->auth.key.length;
1235 	session->digest_length = xform->auth.digest_length;
1236 
1237 	memcpy(session->auth_key.data, xform->auth.key.data,
1238 	       xform->auth.key.length);
1239 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1240 			DIR_ENC : DIR_DEC;
1241 
1242 	return 0;
1243 }
1244 
1245 static int
1246 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1247 		   struct rte_crypto_sym_xform *xform,
1248 		   dpaa_sec_session *session)
1249 {
1250 	session->aead_alg = xform->aead.algo;
1251 	session->iv.length = xform->aead.iv.length;
1252 	session->iv.offset = xform->aead.iv.offset;
1253 	session->auth_only_len = xform->aead.aad_length;
1254 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1255 					     RTE_CACHE_LINE_SIZE);
1256 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1257 		PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1258 		return -ENOMEM;
1259 	}
1260 	session->aead_key.length = xform->aead.key.length;
1261 	session->digest_length = xform->aead.digest_length;
1262 
1263 	memcpy(session->aead_key.data, xform->aead.key.data,
1264 	       xform->aead.key.length);
1265 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1266 			DIR_ENC : DIR_DEC;
1267 
1268 	return 0;
1269 }
1270 
1271 static struct qman_fq *
1272 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1273 {
1274 	unsigned int i;
1275 
1276 	for (i = 0; i < qi->max_nb_sessions; i++) {
1277 		if (qi->inq_attach[i] == 0) {
1278 			qi->inq_attach[i] = 1;
1279 			return &qi->inq[i];
1280 		}
1281 	}
1282 	PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
1283 
1284 	return NULL;
1285 }
1286 
1287 static int
1288 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1289 {
1290 	unsigned int i;
1291 
1292 	for (i = 0; i < qi->max_nb_sessions; i++) {
1293 		if (&qi->inq[i] == fq) {
1294 			qman_retire_fq(fq, NULL);
1295 			qman_oos_fq(fq);
1296 			qi->inq_attach[i] = 0;
1297 			return 0;
1298 		}
1299 	}
1300 	return -1;
1301 }
1302 
1303 static int
1304 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1305 {
1306 	int ret;
1307 
1308 	sess->qp = qp;
1309 	ret = dpaa_sec_prep_cdb(sess);
1310 	if (ret) {
1311 		PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
1312 		return -1;
1313 	}
1314 
1315 	ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1316 			       qman_fq_fqid(&qp->outq));
1317 	if (ret)
1318 		PMD_DRV_LOG(ERR, "Unable to init sec queue");
1319 
1320 	return ret;
1321 }
1322 
1323 static int
1324 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1325 			uint16_t qp_id __rte_unused,
1326 			void *ses __rte_unused)
1327 {
1328 	PMD_INIT_FUNC_TRACE();
1329 	return 0;
1330 }
1331 
1332 static int
1333 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1334 			uint16_t qp_id  __rte_unused,
1335 			void *ses)
1336 {
1337 	dpaa_sec_session *sess = ses;
1338 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1339 
1340 	PMD_INIT_FUNC_TRACE();
1341 
1342 	if (sess->inq)
1343 		dpaa_sec_detach_rxq(qi, sess->inq);
1344 	sess->inq = NULL;
1345 
1346 	sess->qp = NULL;
1347 
1348 	return 0;
1349 }
1350 
1351 static int
1352 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1353 			    struct rte_crypto_sym_xform *xform,	void *sess)
1354 {
1355 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1356 	dpaa_sec_session *session = sess;
1357 
1358 	PMD_INIT_FUNC_TRACE();
1359 
1360 	if (unlikely(sess == NULL)) {
1361 		RTE_LOG(ERR, PMD, "invalid session struct\n");
1362 		return -EINVAL;
1363 	}
1364 
1365 	/* Default IV length = 0 */
1366 	session->iv.length = 0;
1367 
1368 	/* Cipher Only */
1369 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1370 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1371 		dpaa_sec_cipher_init(dev, xform, session);
1372 
1373 	/* Authentication Only */
1374 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1375 		   xform->next == NULL) {
1376 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1377 		dpaa_sec_auth_init(dev, xform, session);
1378 
1379 	/* Cipher then Authenticate */
1380 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1381 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1382 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1383 			dpaa_sec_cipher_init(dev, xform, session);
1384 			dpaa_sec_auth_init(dev, xform->next, session);
1385 		} else {
1386 			PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1387 			return -EINVAL;
1388 		}
1389 
1390 	/* Authenticate then Cipher */
1391 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1392 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1393 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1394 			dpaa_sec_auth_init(dev, xform, session);
1395 			dpaa_sec_cipher_init(dev, xform->next, session);
1396 		} else {
1397 			PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1398 			return -EINVAL;
1399 		}
1400 
1401 	/* AEAD operation for AES-GCM kind of Algorithms */
1402 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1403 		   xform->next == NULL) {
1404 		dpaa_sec_aead_init(dev, xform, session);
1405 
1406 	} else {
1407 		PMD_DRV_LOG(ERR, "Invalid crypto type");
1408 		return -EINVAL;
1409 	}
1410 	session->ctx_pool = internals->ctx_pool;
1411 	session->inq = dpaa_sec_attach_rxq(internals);
1412 	if (session->inq == NULL) {
1413 		PMD_DRV_LOG(ERR, "unable to attach sec queue");
1414 		goto err1;
1415 	}
1416 
1417 	return 0;
1418 
1419 err1:
1420 	rte_free(session->cipher_key.data);
1421 	rte_free(session->auth_key.data);
1422 	memset(session, 0, sizeof(dpaa_sec_session));
1423 
1424 	return -EINVAL;
1425 }
1426 
1427 static int
1428 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1429 		struct rte_crypto_sym_xform *xform,
1430 		struct rte_cryptodev_sym_session *sess,
1431 		struct rte_mempool *mempool)
1432 {
1433 	void *sess_private_data;
1434 	int ret;
1435 
1436 	PMD_INIT_FUNC_TRACE();
1437 
1438 	if (rte_mempool_get(mempool, &sess_private_data)) {
1439 		CDEV_LOG_ERR(
1440 			"Couldn't get object from session mempool");
1441 		return -ENOMEM;
1442 	}
1443 
1444 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1445 	if (ret != 0) {
1446 		PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1447 				"session parameters");
1448 
1449 		/* Return session to mempool */
1450 		rte_mempool_put(mempool, sess_private_data);
1451 		return ret;
1452 	}
1453 
1454 	set_session_private_data(sess, dev->driver_id,
1455 			sess_private_data);
1456 
1457 
1458 	return 0;
1459 }
1460 
1461 /** Clear the memory of session so it doesn't leave key material behind */
1462 static void
1463 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1464 		struct rte_cryptodev_sym_session *sess)
1465 {
1466 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1467 	uint8_t index = dev->driver_id;
1468 	void *sess_priv = get_session_private_data(sess, index);
1469 
1470 	PMD_INIT_FUNC_TRACE();
1471 
1472 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1473 
1474 	if (sess_priv) {
1475 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1476 
1477 		if (s->inq)
1478 			dpaa_sec_detach_rxq(qi, s->inq);
1479 		rte_free(s->cipher_key.data);
1480 		rte_free(s->auth_key.data);
1481 		memset(s, 0, sizeof(dpaa_sec_session));
1482 		set_session_private_data(sess, index, NULL);
1483 		rte_mempool_put(sess_mp, sess_priv);
1484 	}
1485 }
1486 
1487 static int
1488 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1489 			   struct rte_security_session_conf *conf,
1490 			   void *sess)
1491 {
1492 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1493 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1494 	struct rte_crypto_auth_xform *auth_xform;
1495 	struct rte_crypto_cipher_xform *cipher_xform;
1496 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
1497 
1498 	PMD_INIT_FUNC_TRACE();
1499 
1500 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1501 		cipher_xform = &conf->crypto_xform->cipher;
1502 		auth_xform = &conf->crypto_xform->next->auth;
1503 	} else {
1504 		auth_xform = &conf->crypto_xform->auth;
1505 		cipher_xform = &conf->crypto_xform->next->cipher;
1506 	}
1507 	session->proto_alg = conf->protocol;
1508 	session->cipher_key.data = rte_zmalloc(NULL,
1509 					       cipher_xform->key.length,
1510 					       RTE_CACHE_LINE_SIZE);
1511 	if (session->cipher_key.data == NULL &&
1512 			cipher_xform->key.length > 0) {
1513 		RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1514 		return -ENOMEM;
1515 	}
1516 
1517 	session->cipher_key.length = cipher_xform->key.length;
1518 	session->auth_key.data = rte_zmalloc(NULL,
1519 					auth_xform->key.length,
1520 					RTE_CACHE_LINE_SIZE);
1521 	if (session->auth_key.data == NULL &&
1522 			auth_xform->key.length > 0) {
1523 		RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1524 		rte_free(session->cipher_key.data);
1525 		return -ENOMEM;
1526 	}
1527 	session->auth_key.length = auth_xform->key.length;
1528 	memcpy(session->cipher_key.data, cipher_xform->key.data,
1529 			cipher_xform->key.length);
1530 	memcpy(session->auth_key.data, auth_xform->key.data,
1531 			auth_xform->key.length);
1532 
1533 	switch (auth_xform->algo) {
1534 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1535 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1536 		break;
1537 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1538 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1539 		break;
1540 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1541 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1542 		break;
1543 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1544 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1545 		break;
1546 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1547 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1548 		break;
1549 	case RTE_CRYPTO_AUTH_AES_CMAC:
1550 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1551 		break;
1552 	case RTE_CRYPTO_AUTH_NULL:
1553 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1554 		break;
1555 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1556 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1557 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1558 	case RTE_CRYPTO_AUTH_SHA1:
1559 	case RTE_CRYPTO_AUTH_SHA256:
1560 	case RTE_CRYPTO_AUTH_SHA512:
1561 	case RTE_CRYPTO_AUTH_SHA224:
1562 	case RTE_CRYPTO_AUTH_SHA384:
1563 	case RTE_CRYPTO_AUTH_MD5:
1564 	case RTE_CRYPTO_AUTH_AES_GMAC:
1565 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1566 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1567 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
1568 		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
1569 			auth_xform->algo);
1570 		goto out;
1571 	default:
1572 		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1573 			auth_xform->algo);
1574 		goto out;
1575 	}
1576 
1577 	switch (cipher_xform->algo) {
1578 	case RTE_CRYPTO_CIPHER_AES_CBC:
1579 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1580 		break;
1581 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1582 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1583 		break;
1584 	case RTE_CRYPTO_CIPHER_AES_CTR:
1585 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1586 		break;
1587 	case RTE_CRYPTO_CIPHER_NULL:
1588 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1589 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1590 	case RTE_CRYPTO_CIPHER_AES_ECB:
1591 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1592 		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
1593 			cipher_xform->algo);
1594 		goto out;
1595 	default:
1596 		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
1597 			cipher_xform->algo);
1598 		goto out;
1599 	}
1600 
1601 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1602 		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
1603 				sizeof(session->ip4_hdr));
1604 		session->ip4_hdr.ip_v = IPVERSION;
1605 		session->ip4_hdr.ip_hl = 5;
1606 		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
1607 						sizeof(session->ip4_hdr));
1608 		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
1609 		session->ip4_hdr.ip_id = 0;
1610 		session->ip4_hdr.ip_off = 0;
1611 		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
1612 		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
1613 				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
1614 				: IPPROTO_AH;
1615 		session->ip4_hdr.ip_sum = 0;
1616 		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
1617 		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
1618 		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
1619 						(void *)&session->ip4_hdr,
1620 						sizeof(struct ip));
1621 
1622 		session->encap_pdb.options =
1623 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
1624 			PDBOPTS_ESP_OIHI_PDB_INL |
1625 			PDBOPTS_ESP_IVSRC |
1626 			PDBHMO_ESP_ENCAP_DTTL;
1627 		session->encap_pdb.spi = ipsec_xform->spi;
1628 		session->encap_pdb.ip_hdr_len = sizeof(struct ip);
1629 
1630 		session->dir = DIR_ENC;
1631 	} else if (ipsec_xform->direction ==
1632 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1633 		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
1634 		session->decap_pdb.options = sizeof(struct ip) << 16;
1635 		session->dir = DIR_DEC;
1636 	} else
1637 		goto out;
1638 	session->ctx_pool = internals->ctx_pool;
1639 	session->inq = dpaa_sec_attach_rxq(internals);
1640 	if (session->inq == NULL) {
1641 		PMD_DRV_LOG(ERR, "unable to attach sec queue");
1642 		goto out;
1643 	}
1644 
1645 
1646 	return 0;
1647 out:
1648 	rte_free(session->auth_key.data);
1649 	rte_free(session->cipher_key.data);
1650 	memset(session, 0, sizeof(dpaa_sec_session));
1651 	return -1;
1652 }
1653 
1654 static int
1655 dpaa_sec_security_session_create(void *dev,
1656 				 struct rte_security_session_conf *conf,
1657 				 struct rte_security_session *sess,
1658 				 struct rte_mempool *mempool)
1659 {
1660 	void *sess_private_data;
1661 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1662 	int ret;
1663 
1664 	if (rte_mempool_get(mempool, &sess_private_data)) {
1665 		CDEV_LOG_ERR(
1666 			"Couldn't get object from session mempool");
1667 		return -ENOMEM;
1668 	}
1669 
1670 	switch (conf->protocol) {
1671 	case RTE_SECURITY_PROTOCOL_IPSEC:
1672 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
1673 				sess_private_data);
1674 		break;
1675 	case RTE_SECURITY_PROTOCOL_MACSEC:
1676 		return -ENOTSUP;
1677 	default:
1678 		return -EINVAL;
1679 	}
1680 	if (ret != 0) {
1681 		PMD_DRV_LOG(ERR,
1682 			"DPAA2 PMD: failed to configure session parameters");
1683 
1684 		/* Return session to mempool */
1685 		rte_mempool_put(mempool, sess_private_data);
1686 		return ret;
1687 	}
1688 
1689 	set_sec_session_private_data(sess, sess_private_data);
1690 
1691 	return ret;
1692 }
1693 
1694 /** Clear the memory of session so it doesn't leave key material behind */
1695 static int
1696 dpaa_sec_security_session_destroy(void *dev __rte_unused,
1697 		struct rte_security_session *sess)
1698 {
1699 	PMD_INIT_FUNC_TRACE();
1700 	void *sess_priv = get_sec_session_private_data(sess);
1701 
1702 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1703 
1704 	if (sess_priv) {
1705 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1706 
1707 		rte_free(s->cipher_key.data);
1708 		rte_free(s->auth_key.data);
1709 		memset(sess, 0, sizeof(dpaa_sec_session));
1710 		set_sec_session_private_data(sess, NULL);
1711 		rte_mempool_put(sess_mp, sess_priv);
1712 	}
1713 	return 0;
1714 }
1715 
1716 
1717 static int
1718 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1719 		       struct rte_cryptodev_config *config __rte_unused)
1720 {
1721 	PMD_INIT_FUNC_TRACE();
1722 
1723 	return 0;
1724 }
1725 
1726 static int
1727 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
1728 {
1729 	PMD_INIT_FUNC_TRACE();
1730 	return 0;
1731 }
1732 
1733 static void
1734 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
1735 {
1736 	PMD_INIT_FUNC_TRACE();
1737 }
1738 
1739 static int
1740 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
1741 {
1742 	PMD_INIT_FUNC_TRACE();
1743 	return 0;
1744 }
1745 
1746 static void
1747 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
1748 		       struct rte_cryptodev_info *info)
1749 {
1750 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1751 
1752 	PMD_INIT_FUNC_TRACE();
1753 	if (info != NULL) {
1754 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1755 		info->feature_flags = dev->feature_flags;
1756 		info->capabilities = dpaa_sec_capabilities;
1757 		info->sym.max_nb_sessions = internals->max_nb_sessions;
1758 		info->sym.max_nb_sessions_per_qp =
1759 			RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
1760 			RTE_DPAA_MAX_NB_SEC_QPS;
1761 		info->driver_id = cryptodev_driver_id;
1762 	}
1763 }
1764 
1765 static struct rte_cryptodev_ops crypto_ops = {
1766 	.dev_configure	      = dpaa_sec_dev_configure,
1767 	.dev_start	      = dpaa_sec_dev_start,
1768 	.dev_stop	      = dpaa_sec_dev_stop,
1769 	.dev_close	      = dpaa_sec_dev_close,
1770 	.dev_infos_get        = dpaa_sec_dev_infos_get,
1771 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
1772 	.queue_pair_release   = dpaa_sec_queue_pair_release,
1773 	.queue_pair_start     = dpaa_sec_queue_pair_start,
1774 	.queue_pair_stop      = dpaa_sec_queue_pair_stop,
1775 	.queue_pair_count     = dpaa_sec_queue_pair_count,
1776 	.session_get_size     = dpaa_sec_session_get_size,
1777 	.session_configure    = dpaa_sec_session_configure,
1778 	.session_clear        = dpaa_sec_session_clear,
1779 	.qp_attach_session    = dpaa_sec_qp_attach_sess,
1780 	.qp_detach_session    = dpaa_sec_qp_detach_sess,
1781 };
1782 
1783 static const struct rte_security_capability *
1784 dpaa_sec_capabilities_get(void *device __rte_unused)
1785 {
1786 	return dpaa_sec_security_cap;
1787 }
1788 
1789 struct rte_security_ops dpaa_sec_security_ops = {
1790 	.session_create = dpaa_sec_security_session_create,
1791 	.session_update = NULL,
1792 	.session_stats_get = NULL,
1793 	.session_destroy = dpaa_sec_security_session_destroy,
1794 	.set_pkt_metadata = NULL,
1795 	.capabilities_get = dpaa_sec_capabilities_get
1796 };
1797 
1798 static int
1799 dpaa_sec_uninit(struct rte_cryptodev *dev)
1800 {
1801 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1802 
1803 	if (dev == NULL)
1804 		return -ENODEV;
1805 
1806 	rte_free(dev->security_ctx);
1807 
1808 	rte_mempool_free(internals->ctx_pool);
1809 	rte_free(internals);
1810 
1811 	PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
1812 		     dev->data->name, rte_socket_id());
1813 
1814 	return 0;
1815 }
1816 
1817 static int
1818 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
1819 {
1820 	struct dpaa_sec_dev_private *internals;
1821 	struct rte_security_ctx *security_instance;
1822 	struct dpaa_sec_qp *qp;
1823 	uint32_t i, flags;
1824 	int ret;
1825 	char str[20];
1826 
1827 	PMD_INIT_FUNC_TRACE();
1828 
1829 	cryptodev->driver_id = cryptodev_driver_id;
1830 	cryptodev->dev_ops = &crypto_ops;
1831 
1832 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
1833 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
1834 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1835 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
1836 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
1837 			RTE_CRYPTODEV_FF_SECURITY;
1838 
1839 	internals = cryptodev->data->dev_private;
1840 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
1841 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
1842 
1843 	/*
1844 	 * For secondary processes, we don't initialise any further as primary
1845 	 * has already done this work. Only check we don't need a different
1846 	 * RX function
1847 	 */
1848 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1849 		PMD_INIT_LOG(DEBUG, "Device already init by primary process");
1850 		return 0;
1851 	}
1852 
1853 	/* Initialize security_ctx only for primary process*/
1854 	security_instance = rte_malloc("rte_security_instances_ops",
1855 				sizeof(struct rte_security_ctx), 0);
1856 	if (security_instance == NULL)
1857 		return -ENOMEM;
1858 	security_instance->device = (void *)cryptodev;
1859 	security_instance->ops = &dpaa_sec_security_ops;
1860 	security_instance->sess_cnt = 0;
1861 	cryptodev->security_ctx = security_instance;
1862 
1863 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
1864 		/* init qman fq for queue pair */
1865 		qp = &internals->qps[i];
1866 		ret = dpaa_sec_init_tx(&qp->outq);
1867 		if (ret) {
1868 			PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
1869 			goto init_error;
1870 		}
1871 	}
1872 
1873 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
1874 		QMAN_FQ_FLAG_TO_DCPORTAL;
1875 	for (i = 0; i < internals->max_nb_sessions; i++) {
1876 		/* create rx qman fq for sessions*/
1877 		ret = qman_create_fq(0, flags, &internals->inq[i]);
1878 		if (unlikely(ret != 0)) {
1879 			PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
1880 			goto init_error;
1881 		}
1882 	}
1883 
1884 	sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
1885 	internals->ctx_pool = rte_mempool_create((const char *)str,
1886 			CTX_POOL_NUM_BUFS,
1887 			CTX_POOL_BUF_SIZE,
1888 			CTX_POOL_CACHE_SIZE, 0,
1889 			NULL, NULL, NULL, NULL,
1890 			SOCKET_ID_ANY, 0);
1891 	if (!internals->ctx_pool) {
1892 		RTE_LOG(ERR, PMD, "%s create failed\n", str);
1893 		goto init_error;
1894 	}
1895 
1896 	PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1897 	return 0;
1898 
1899 init_error:
1900 	PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1901 
1902 	dpaa_sec_uninit(cryptodev);
1903 	return -EFAULT;
1904 }
1905 
1906 static int
1907 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
1908 				struct rte_dpaa_device *dpaa_dev)
1909 {
1910 	struct rte_cryptodev *cryptodev;
1911 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1912 
1913 	int retval;
1914 
1915 	sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
1916 
1917 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1918 	if (cryptodev == NULL)
1919 		return -ENOMEM;
1920 
1921 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1922 		cryptodev->data->dev_private = rte_zmalloc_socket(
1923 					"cryptodev private structure",
1924 					sizeof(struct dpaa_sec_dev_private),
1925 					RTE_CACHE_LINE_SIZE,
1926 					rte_socket_id());
1927 
1928 		if (cryptodev->data->dev_private == NULL)
1929 			rte_panic("Cannot allocate memzone for private "
1930 					"device data");
1931 	}
1932 
1933 	dpaa_dev->crypto_dev = cryptodev;
1934 	cryptodev->device = &dpaa_dev->device;
1935 	cryptodev->device->driver = &dpaa_drv->driver;
1936 
1937 	/* init user callbacks */
1938 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
1939 
1940 	/* if sec device version is not configured */
1941 	if (!rta_get_sec_era()) {
1942 		const struct device_node *caam_node;
1943 
1944 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
1945 			const uint32_t *prop = of_get_property(caam_node,
1946 					"fsl,sec-era",
1947 					NULL);
1948 			if (prop) {
1949 				rta_set_sec_era(
1950 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
1951 				break;
1952 			}
1953 		}
1954 	}
1955 
1956 	/* Invoke PMD device initialization function */
1957 	retval = dpaa_sec_dev_init(cryptodev);
1958 	if (retval == 0)
1959 		return 0;
1960 
1961 	/* In case of error, cleanup is done */
1962 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1963 		rte_free(cryptodev->data->dev_private);
1964 
1965 	rte_cryptodev_pmd_release_device(cryptodev);
1966 
1967 	return -ENXIO;
1968 }
1969 
1970 static int
1971 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
1972 {
1973 	struct rte_cryptodev *cryptodev;
1974 	int ret;
1975 
1976 	cryptodev = dpaa_dev->crypto_dev;
1977 	if (cryptodev == NULL)
1978 		return -ENODEV;
1979 
1980 	ret = dpaa_sec_uninit(cryptodev);
1981 	if (ret)
1982 		return ret;
1983 
1984 	return rte_cryptodev_pmd_destroy(cryptodev);
1985 }
1986 
1987 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
1988 	.drv_type = FSL_DPAA_CRYPTO,
1989 	.driver = {
1990 		.name = "DPAA SEC PMD"
1991 	},
1992 	.probe = cryptodev_dpaa_sec_probe,
1993 	.remove = cryptodev_dpaa_sec_remove,
1994 };
1995 
1996 static struct cryptodev_driver dpaa_sec_crypto_drv;
1997 
1998 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
1999 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
2000 		cryptodev_driver_id);
2001