xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision cdea34452b09f5ce3c2f3ada6182afa0071fce47)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright 2017 NXP.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of NXP nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <fcntl.h>
35 #include <unistd.h>
36 #include <sched.h>
37 #include <net/if.h>
38 
39 #include <rte_byteorder.h>
40 #include <rte_common.h>
41 #include <rte_cryptodev_pmd.h>
42 #include <rte_crypto.h>
43 #include <rte_cryptodev.h>
44 #include <rte_cycles.h>
45 #include <rte_dev.h>
46 #include <rte_kvargs.h>
47 #include <rte_malloc.h>
48 #include <rte_mbuf.h>
49 #include <rte_memcpy.h>
50 #include <rte_string_fns.h>
51 
52 #include <fsl_usd.h>
53 #include <fsl_qman.h>
54 #include <of.h>
55 
56 /* RTA header files */
57 #include <hw/desc/common.h>
58 #include <hw/desc/algo.h>
59 #include <hw/desc/ipsec.h>
60 
61 #include <rte_dpaa_bus.h>
62 #include <dpaa_sec.h>
63 #include <dpaa_sec_log.h>
64 
65 enum rta_sec_era rta_sec_era;
66 
67 static uint8_t cryptodev_driver_id;
68 
69 static __thread struct rte_crypto_op **dpaa_sec_ops;
70 static __thread int dpaa_sec_op_nb;
71 
72 static inline void
73 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
74 {
75 	if (!ctx->fd_status) {
76 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
77 	} else {
78 		PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
79 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
80 	}
81 
82 	/* report op status to sym->op and then free the ctx memeory  */
83 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
84 }
85 
86 static inline struct dpaa_sec_op_ctx *
87 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
88 {
89 	struct dpaa_sec_op_ctx *ctx;
90 	int retval;
91 
92 	retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
93 	if (!ctx || retval) {
94 		PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
95 		return NULL;
96 	}
97 	/*
98 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
99 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
100 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
101 	 * each packet, memset is costlier than dcbz_64().
102 	 */
103 	dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
104 	dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
105 	dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
106 	dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
107 
108 	ctx->ctx_pool = ses->ctx_pool;
109 
110 	return ctx;
111 }
112 
113 static inline phys_addr_t
114 dpaa_mem_vtop(void *vaddr)
115 {
116 	const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
117 	uint64_t vaddr_64, paddr;
118 	int i;
119 
120 	vaddr_64 = (uint64_t)vaddr;
121 	for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
122 		if (vaddr_64 >= memseg[i].addr_64 &&
123 		    vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
124 			paddr = memseg[i].phys_addr +
125 				(vaddr_64 - memseg[i].addr_64);
126 
127 			return (phys_addr_t)paddr;
128 		}
129 	}
130 	return (phys_addr_t)(NULL);
131 }
132 
133 static inline void *
134 dpaa_mem_ptov(phys_addr_t paddr)
135 {
136 	const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
137 	int i;
138 
139 	for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
140 		if (paddr >= memseg[i].phys_addr &&
141 		    (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
142 			return (void *)(memseg[i].addr_64 +
143 					(paddr - memseg[i].phys_addr));
144 	}
145 	return NULL;
146 }
147 
148 static void
149 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
150 		   struct qman_fq *fq,
151 		   const struct qm_mr_entry *msg)
152 {
153 	RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
154 		   fq->fqid, msg->ern.rc, msg->ern.seqnum);
155 }
156 
157 /* initialize the queue with dest chan as caam chan so that
158  * all the packets in this queue could be dispatched into caam
159  */
160 static int
161 dpaa_sec_init_rx(struct qman_fq *fq_in, phys_addr_t hwdesc,
162 		 uint32_t fqid_out)
163 {
164 	struct qm_mcc_initfq fq_opts;
165 	uint32_t flags;
166 	int ret = -1;
167 
168 	/* Clear FQ options */
169 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
170 
171 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
172 		QMAN_FQ_FLAG_TO_DCPORTAL;
173 
174 	ret = qman_create_fq(0, flags, fq_in);
175 	if (unlikely(ret != 0)) {
176 		PMD_INIT_LOG(ERR, "qman_create_fq failed");
177 		return ret;
178 	}
179 
180 	flags = QMAN_INITFQ_FLAG_SCHED;
181 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
182 			  QM_INITFQ_WE_CONTEXTB;
183 
184 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
185 	fq_opts.fqd.context_b = fqid_out;
186 	fq_opts.fqd.dest.channel = qm_channel_caam;
187 	fq_opts.fqd.dest.wq = 0;
188 
189 	fq_in->cb.ern  = ern_sec_fq_handler;
190 
191 	ret = qman_init_fq(fq_in, flags, &fq_opts);
192 	if (unlikely(ret != 0))
193 		PMD_INIT_LOG(ERR, "qman_init_fq failed");
194 
195 	return ret;
196 }
197 
198 /* something is put into in_fq and caam put the crypto result into out_fq */
199 static enum qman_cb_dqrr_result
200 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
201 		  struct qman_fq *fq __always_unused,
202 		  const struct qm_dqrr_entry *dqrr)
203 {
204 	const struct qm_fd *fd;
205 	struct dpaa_sec_job *job;
206 	struct dpaa_sec_op_ctx *ctx;
207 
208 	if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
209 		return qman_cb_dqrr_defer;
210 
211 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
212 		return qman_cb_dqrr_consume;
213 
214 	fd = &dqrr->fd;
215 	/* sg is embedded in an op ctx,
216 	 * sg[0] is for output
217 	 * sg[1] for input
218 	 */
219 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
220 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
221 	ctx->fd_status = fd->status;
222 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
223 	dpaa_sec_op_ending(ctx);
224 
225 	return qman_cb_dqrr_consume;
226 }
227 
228 /* caam result is put into this queue */
229 static int
230 dpaa_sec_init_tx(struct qman_fq *fq)
231 {
232 	int ret;
233 	struct qm_mcc_initfq opts;
234 	uint32_t flags;
235 
236 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
237 		QMAN_FQ_FLAG_DYNAMIC_FQID;
238 
239 	ret = qman_create_fq(0, flags, fq);
240 	if (unlikely(ret)) {
241 		PMD_INIT_LOG(ERR, "qman_create_fq failed");
242 		return ret;
243 	}
244 
245 	memset(&opts, 0, sizeof(opts));
246 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
247 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
248 
249 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
250 
251 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
252 	fq->cb.ern  = ern_sec_fq_handler;
253 
254 	ret = qman_init_fq(fq, 0, &opts);
255 	if (unlikely(ret)) {
256 		PMD_INIT_LOG(ERR, "unable to init caam source fq!");
257 		return ret;
258 	}
259 
260 	return ret;
261 }
262 
263 static inline int is_cipher_only(dpaa_sec_session *ses)
264 {
265 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
266 		(ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
267 }
268 
269 static inline int is_auth_only(dpaa_sec_session *ses)
270 {
271 	return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
272 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
273 }
274 
275 static inline int is_aead(dpaa_sec_session *ses)
276 {
277 	return ((ses->cipher_alg == 0) &&
278 		(ses->auth_alg == 0) &&
279 		(ses->aead_alg != 0));
280 }
281 
282 static inline int is_auth_cipher(dpaa_sec_session *ses)
283 {
284 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
285 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
286 }
287 
288 static inline int is_encode(dpaa_sec_session *ses)
289 {
290 	return ses->dir == DIR_ENC;
291 }
292 
293 static inline int is_decode(dpaa_sec_session *ses)
294 {
295 	return ses->dir == DIR_DEC;
296 }
297 
298 static inline void
299 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
300 {
301 	switch (ses->auth_alg) {
302 	case RTE_CRYPTO_AUTH_NULL:
303 		ses->digest_length = 0;
304 		break;
305 	case RTE_CRYPTO_AUTH_MD5_HMAC:
306 		alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
307 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
308 		break;
309 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
310 		alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
311 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
312 		break;
313 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
314 		alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
315 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
316 		break;
317 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
318 		alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
319 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
320 		break;
321 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
322 		alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
323 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
324 		break;
325 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
326 		alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
327 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
328 		break;
329 	default:
330 		PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
331 	}
332 }
333 
334 static inline void
335 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
336 {
337 	switch (ses->cipher_alg) {
338 	case RTE_CRYPTO_CIPHER_NULL:
339 		break;
340 	case RTE_CRYPTO_CIPHER_AES_CBC:
341 		alginfo_c->algtype = OP_ALG_ALGSEL_AES;
342 		alginfo_c->algmode = OP_ALG_AAI_CBC;
343 		break;
344 	case RTE_CRYPTO_CIPHER_3DES_CBC:
345 		alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
346 		alginfo_c->algmode = OP_ALG_AAI_CBC;
347 		break;
348 	case RTE_CRYPTO_CIPHER_AES_CTR:
349 		alginfo_c->algtype = OP_ALG_ALGSEL_AES;
350 		alginfo_c->algmode = OP_ALG_AAI_CTR;
351 		break;
352 	default:
353 		PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
354 	}
355 }
356 
357 static inline void
358 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
359 {
360 	switch (ses->aead_alg) {
361 	case RTE_CRYPTO_AEAD_AES_GCM:
362 		alginfo->algtype = OP_ALG_ALGSEL_AES;
363 		alginfo->algmode = OP_ALG_AAI_GCM;
364 		break;
365 	default:
366 		PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
367 	}
368 }
369 
370 
371 /* prepare command block of the session */
372 static int
373 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
374 {
375 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
376 	uint32_t shared_desc_len = 0;
377 	struct sec_cdb *cdb = &ses->qp->cdb;
378 	int err;
379 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
380 	int swap = false;
381 #else
382 	int swap = true;
383 #endif
384 
385 	memset(cdb, 0, sizeof(struct sec_cdb));
386 
387 	if (is_cipher_only(ses)) {
388 		caam_cipher_alg(ses, &alginfo_c);
389 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
390 			PMD_TX_LOG(ERR, "not supported cipher alg\n");
391 			return -ENOTSUP;
392 		}
393 
394 		alginfo_c.key = (uint64_t)ses->cipher_key.data;
395 		alginfo_c.keylen = ses->cipher_key.length;
396 		alginfo_c.key_enc_flags = 0;
397 		alginfo_c.key_type = RTA_DATA_IMM;
398 
399 		shared_desc_len = cnstr_shdsc_blkcipher(
400 						cdb->sh_desc, true,
401 						swap, &alginfo_c,
402 						NULL,
403 						ses->iv.length,
404 						ses->dir);
405 	} else if (is_auth_only(ses)) {
406 		caam_auth_alg(ses, &alginfo_a);
407 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
408 			PMD_TX_LOG(ERR, "not supported auth alg\n");
409 			return -ENOTSUP;
410 		}
411 
412 		alginfo_a.key = (uint64_t)ses->auth_key.data;
413 		alginfo_a.keylen = ses->auth_key.length;
414 		alginfo_a.key_enc_flags = 0;
415 		alginfo_a.key_type = RTA_DATA_IMM;
416 
417 		shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
418 						   swap, &alginfo_a,
419 						   !ses->dir,
420 						   ses->digest_length);
421 	} else if (is_aead(ses)) {
422 		caam_aead_alg(ses, &alginfo);
423 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
424 			PMD_TX_LOG(ERR, "not supported aead alg\n");
425 			return -ENOTSUP;
426 		}
427 		alginfo.key = (uint64_t)ses->aead_key.data;
428 		alginfo.keylen = ses->aead_key.length;
429 		alginfo.key_enc_flags = 0;
430 		alginfo.key_type = RTA_DATA_IMM;
431 
432 		if (ses->dir == DIR_ENC)
433 			shared_desc_len = cnstr_shdsc_gcm_encap(
434 					cdb->sh_desc, true, swap,
435 					&alginfo,
436 					ses->iv.length,
437 					ses->digest_length);
438 		else
439 			shared_desc_len = cnstr_shdsc_gcm_decap(
440 					cdb->sh_desc, true, swap,
441 					&alginfo,
442 					ses->iv.length,
443 					ses->digest_length);
444 	} else {
445 		caam_cipher_alg(ses, &alginfo_c);
446 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
447 			PMD_TX_LOG(ERR, "not supported cipher alg\n");
448 			return -ENOTSUP;
449 		}
450 
451 		alginfo_c.key = (uint64_t)ses->cipher_key.data;
452 		alginfo_c.keylen = ses->cipher_key.length;
453 		alginfo_c.key_enc_flags = 0;
454 		alginfo_c.key_type = RTA_DATA_IMM;
455 
456 		caam_auth_alg(ses, &alginfo_a);
457 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
458 			PMD_TX_LOG(ERR, "not supported auth alg\n");
459 			return -ENOTSUP;
460 		}
461 
462 		alginfo_a.key = (uint64_t)ses->auth_key.data;
463 		alginfo_a.keylen = ses->auth_key.length;
464 		alginfo_a.key_enc_flags = 0;
465 		alginfo_a.key_type = RTA_DATA_IMM;
466 
467 		cdb->sh_desc[0] = alginfo_c.keylen;
468 		cdb->sh_desc[1] = alginfo_a.keylen;
469 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
470 				       MIN_JOB_DESC_SIZE,
471 				       (unsigned int *)cdb->sh_desc,
472 				       &cdb->sh_desc[2], 2);
473 
474 		if (err < 0) {
475 			PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
476 			return err;
477 		}
478 		if (cdb->sh_desc[2] & 1)
479 			alginfo_c.key_type = RTA_DATA_IMM;
480 		else {
481 			alginfo_c.key = (uint64_t)dpaa_mem_vtop(
482 							(void *)alginfo_c.key);
483 			alginfo_c.key_type = RTA_DATA_PTR;
484 		}
485 		if (cdb->sh_desc[2] & (1<<1))
486 			alginfo_a.key_type = RTA_DATA_IMM;
487 		else {
488 			alginfo_a.key = (uint64_t)dpaa_mem_vtop(
489 							(void *)alginfo_a.key);
490 			alginfo_a.key_type = RTA_DATA_PTR;
491 		}
492 		cdb->sh_desc[0] = 0;
493 		cdb->sh_desc[1] = 0;
494 		cdb->sh_desc[2] = 0;
495 
496 		/* Auth_only_len is set as 0 here and it will be overwritten
497 		 *  in fd for each packet.
498 		 */
499 		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
500 				true, swap, &alginfo_c, &alginfo_a,
501 				ses->iv.length, 0,
502 				ses->digest_length, ses->dir);
503 	}
504 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
505 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
506 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
507 
508 	return 0;
509 }
510 
511 static inline unsigned int
512 dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
513 {
514 	unsigned int pkts = 0;
515 	int ret;
516 	struct qm_mcr_queryfq_np np;
517 	enum qman_fq_state state;
518 	uint32_t flags;
519 	uint32_t vdqcr;
520 
521 	qman_query_fq_np(fq, &np);
522 	if (np.frm_cnt) {
523 		vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
524 		if (exact)
525 			vdqcr |= QM_VDQCR_EXACT;
526 		ret = qman_volatile_dequeue(fq, 0, vdqcr);
527 		if (ret)
528 			return 0;
529 		do {
530 			pkts += qman_poll_dqrr(len);
531 			qman_fq_state(fq, &state, &flags);
532 		} while (flags & QMAN_FQ_STATE_VDQCR);
533 	}
534 	return pkts;
535 }
536 
537 /* qp is lockless, should be accessed by only one thread */
538 static int
539 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
540 {
541 	struct qman_fq *fq;
542 
543 	fq = &qp->outq;
544 	dpaa_sec_op_nb = 0;
545 	dpaa_sec_ops = ops;
546 
547 	if (unlikely(nb_ops > DPAA_SEC_BURST))
548 		nb_ops = DPAA_SEC_BURST;
549 
550 	return dpaa_volatile_deq(fq, nb_ops, 1);
551 }
552 
553 /**
554  * packet looks like:
555  *		|<----data_len------->|
556  *    |ip_header|ah_header|icv|payload|
557  *              ^
558  *		|
559  *	   mbuf->pkt.data
560  */
561 static inline struct dpaa_sec_job *
562 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
563 {
564 	struct rte_crypto_sym_op *sym = op->sym;
565 	struct rte_mbuf *mbuf = sym->m_src;
566 	struct dpaa_sec_job *cf;
567 	struct dpaa_sec_op_ctx *ctx;
568 	struct qm_sg_entry *sg;
569 	phys_addr_t start_addr;
570 	uint8_t *old_digest;
571 
572 	ctx = dpaa_sec_alloc_ctx(ses);
573 	if (!ctx)
574 		return NULL;
575 
576 	cf = &ctx->job;
577 	ctx->op = op;
578 	old_digest = ctx->digest;
579 
580 	start_addr = rte_pktmbuf_mtophys(mbuf);
581 	/* output */
582 	sg = &cf->sg[0];
583 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
584 	sg->length = ses->digest_length;
585 	cpu_to_hw_sg(sg);
586 
587 	/* input */
588 	sg = &cf->sg[1];
589 	if (is_decode(ses)) {
590 		/* need to extend the input to a compound frame */
591 		sg->extension = 1;
592 		qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
593 		sg->length = sym->auth.data.length + ses->digest_length;
594 		sg->final = 1;
595 		cpu_to_hw_sg(sg);
596 
597 		sg = &cf->sg[2];
598 		/* hash result or digest, save digest first */
599 		rte_memcpy(old_digest, sym->auth.digest.data,
600 			   ses->digest_length);
601 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
602 		sg->length = sym->auth.data.length;
603 		cpu_to_hw_sg(sg);
604 
605 		/* let's check digest by hw */
606 		start_addr = dpaa_mem_vtop(old_digest);
607 		sg++;
608 		qm_sg_entry_set64(sg, start_addr);
609 		sg->length = ses->digest_length;
610 		sg->final = 1;
611 		cpu_to_hw_sg(sg);
612 	} else {
613 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
614 		sg->length = sym->auth.data.length;
615 		sg->final = 1;
616 		cpu_to_hw_sg(sg);
617 	}
618 
619 	return cf;
620 }
621 
622 static inline struct dpaa_sec_job *
623 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
624 {
625 	struct rte_crypto_sym_op *sym = op->sym;
626 	struct rte_mbuf *mbuf = sym->m_src;
627 	struct dpaa_sec_job *cf;
628 	struct dpaa_sec_op_ctx *ctx;
629 	struct qm_sg_entry *sg;
630 	phys_addr_t start_addr;
631 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
632 			ses->iv.offset);
633 
634 	ctx = dpaa_sec_alloc_ctx(ses);
635 	if (!ctx)
636 		return NULL;
637 
638 	cf = &ctx->job;
639 	ctx->op = op;
640 	start_addr = rte_pktmbuf_mtophys(mbuf);
641 
642 	/* output */
643 	sg = &cf->sg[0];
644 	qm_sg_entry_set64(sg, start_addr + sym->cipher.data.offset);
645 	sg->length = sym->cipher.data.length + ses->iv.length;
646 	cpu_to_hw_sg(sg);
647 
648 	/* input */
649 	sg = &cf->sg[1];
650 
651 	/* need to extend the input to a compound frame */
652 	sg->extension = 1;
653 	sg->final = 1;
654 	sg->length = sym->cipher.data.length + ses->iv.length;
655 	qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
656 	cpu_to_hw_sg(sg);
657 
658 	sg = &cf->sg[2];
659 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
660 	sg->length = ses->iv.length;
661 	cpu_to_hw_sg(sg);
662 
663 	sg++;
664 	qm_sg_entry_set64(sg, start_addr + sym->cipher.data.offset);
665 	sg->length = sym->cipher.data.length;
666 	sg->final = 1;
667 	cpu_to_hw_sg(sg);
668 
669 	return cf;
670 }
671 
672 static inline struct dpaa_sec_job *
673 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
674 {
675 	struct rte_crypto_sym_op *sym = op->sym;
676 	struct rte_mbuf *mbuf = sym->m_src;
677 	struct dpaa_sec_job *cf;
678 	struct dpaa_sec_op_ctx *ctx;
679 	struct qm_sg_entry *sg;
680 	phys_addr_t start_addr;
681 	uint32_t length = 0;
682 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
683 			ses->iv.offset);
684 
685 	start_addr = mbuf->buf_physaddr + mbuf->data_off;
686 
687 	ctx = dpaa_sec_alloc_ctx(ses);
688 	if (!ctx)
689 		return NULL;
690 
691 	cf = &ctx->job;
692 	ctx->op = op;
693 
694 	/* input */
695 	rte_prefetch0(cf->sg);
696 	sg = &cf->sg[2];
697 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
698 	if (is_encode(ses)) {
699 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
700 		sg->length = ses->iv.length;
701 		length += sg->length;
702 		cpu_to_hw_sg(sg);
703 
704 		sg++;
705 		if (ses->auth_only_len) {
706 			qm_sg_entry_set64(sg,
707 					  dpaa_mem_vtop(sym->aead.aad.data));
708 			sg->length = ses->auth_only_len;
709 			length += sg->length;
710 			cpu_to_hw_sg(sg);
711 			sg++;
712 		}
713 		qm_sg_entry_set64(sg, start_addr + sym->aead.data.offset);
714 		sg->length = sym->aead.data.length;
715 		length += sg->length;
716 		sg->final = 1;
717 		cpu_to_hw_sg(sg);
718 	} else {
719 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
720 		sg->length = ses->iv.length;
721 		length += sg->length;
722 		cpu_to_hw_sg(sg);
723 
724 		sg++;
725 		if (ses->auth_only_len) {
726 			qm_sg_entry_set64(sg,
727 					  dpaa_mem_vtop(sym->aead.aad.data));
728 			sg->length = ses->auth_only_len;
729 			length += sg->length;
730 			cpu_to_hw_sg(sg);
731 			sg++;
732 		}
733 		qm_sg_entry_set64(sg, start_addr + sym->aead.data.offset);
734 		sg->length = sym->aead.data.length;
735 		length += sg->length;
736 		cpu_to_hw_sg(sg);
737 
738 		memcpy(ctx->digest, sym->aead.digest.data,
739 		       ses->digest_length);
740 		sg++;
741 
742 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
743 		sg->length = ses->digest_length;
744 		length += sg->length;
745 		sg->final = 1;
746 		cpu_to_hw_sg(sg);
747 	}
748 	/* input compound frame */
749 	cf->sg[1].length = length;
750 	cf->sg[1].extension = 1;
751 	cf->sg[1].final = 1;
752 	cpu_to_hw_sg(&cf->sg[1]);
753 
754 	/* output */
755 	sg++;
756 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
757 	qm_sg_entry_set64(sg,
758 		start_addr + sym->aead.data.offset - ses->auth_only_len);
759 	sg->length = sym->aead.data.length + ses->auth_only_len;
760 	length = sg->length;
761 	if (is_encode(ses)) {
762 		cpu_to_hw_sg(sg);
763 		/* set auth output */
764 		sg++;
765 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
766 		sg->length = ses->digest_length;
767 		length += sg->length;
768 	}
769 	sg->final = 1;
770 	cpu_to_hw_sg(sg);
771 
772 	/* output compound frame */
773 	cf->sg[0].length = length;
774 	cf->sg[0].extension = 1;
775 	cpu_to_hw_sg(&cf->sg[0]);
776 
777 	return cf;
778 }
779 
780 static inline struct dpaa_sec_job *
781 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
782 {
783 	struct rte_crypto_sym_op *sym = op->sym;
784 	struct rte_mbuf *mbuf = sym->m_src;
785 	struct dpaa_sec_job *cf;
786 	struct dpaa_sec_op_ctx *ctx;
787 	struct qm_sg_entry *sg;
788 	phys_addr_t start_addr;
789 	uint32_t length = 0;
790 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
791 			ses->iv.offset);
792 
793 	start_addr = mbuf->buf_physaddr + mbuf->data_off;
794 
795 	ctx = dpaa_sec_alloc_ctx(ses);
796 	if (!ctx)
797 		return NULL;
798 
799 	cf = &ctx->job;
800 	ctx->op = op;
801 
802 	/* input */
803 	rte_prefetch0(cf->sg);
804 	sg = &cf->sg[2];
805 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
806 	if (is_encode(ses)) {
807 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
808 		sg->length = ses->iv.length;
809 		length += sg->length;
810 		cpu_to_hw_sg(sg);
811 
812 		sg++;
813 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
814 		sg->length = sym->auth.data.length;
815 		length += sg->length;
816 		sg->final = 1;
817 		cpu_to_hw_sg(sg);
818 	} else {
819 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
820 		sg->length = ses->iv.length;
821 		length += sg->length;
822 		cpu_to_hw_sg(sg);
823 
824 		sg++;
825 
826 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
827 		sg->length = sym->auth.data.length;
828 		length += sg->length;
829 		cpu_to_hw_sg(sg);
830 
831 		memcpy(ctx->digest, sym->auth.digest.data,
832 		       ses->digest_length);
833 		sg++;
834 
835 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
836 		sg->length = ses->digest_length;
837 		length += sg->length;
838 		sg->final = 1;
839 		cpu_to_hw_sg(sg);
840 	}
841 	/* input compound frame */
842 	cf->sg[1].length = length;
843 	cf->sg[1].extension = 1;
844 	cf->sg[1].final = 1;
845 	cpu_to_hw_sg(&cf->sg[1]);
846 
847 	/* output */
848 	sg++;
849 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
850 	qm_sg_entry_set64(sg, start_addr + sym->cipher.data.offset);
851 	sg->length = sym->cipher.data.length;
852 	length = sg->length;
853 	if (is_encode(ses)) {
854 		cpu_to_hw_sg(sg);
855 		/* set auth output */
856 		sg++;
857 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
858 		sg->length = ses->digest_length;
859 		length += sg->length;
860 	}
861 	sg->final = 1;
862 	cpu_to_hw_sg(sg);
863 
864 	/* output compound frame */
865 	cf->sg[0].length = length;
866 	cf->sg[0].extension = 1;
867 	cpu_to_hw_sg(&cf->sg[0]);
868 
869 	return cf;
870 }
871 
872 static int
873 dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
874 {
875 	struct dpaa_sec_job *cf;
876 	dpaa_sec_session *ses;
877 	struct qm_fd fd;
878 	int ret;
879 	uint32_t auth_only_len = op->sym->auth.data.length -
880 				op->sym->cipher.data.length;
881 
882 	ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
883 					cryptodev_driver_id);
884 
885 	if (unlikely(!qp->ses || qp->ses != ses)) {
886 		qp->ses = ses;
887 		ses->qp = qp;
888 		ret = dpaa_sec_prep_cdb(ses);
889 		if (ret)
890 			return ret;
891 	}
892 
893 	/*
894 	 * Segmented buffer is not supported.
895 	 */
896 	if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
897 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
898 		return -ENOTSUP;
899 	}
900 	if (is_auth_only(ses)) {
901 		cf = build_auth_only(op, ses);
902 	} else if (is_cipher_only(ses)) {
903 		cf = build_cipher_only(op, ses);
904 	} else if (is_aead(ses)) {
905 		cf = build_cipher_auth_gcm(op, ses);
906 		auth_only_len = ses->auth_only_len;
907 	} else if (is_auth_cipher(ses)) {
908 		cf = build_cipher_auth(op, ses);
909 	} else {
910 		PMD_TX_LOG(ERR, "not supported sec op");
911 		return -ENOTSUP;
912 	}
913 	if (unlikely(!cf))
914 		return -ENOMEM;
915 
916 	memset(&fd, 0, sizeof(struct qm_fd));
917 	qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
918 	fd._format1 = qm_fd_compound;
919 	fd.length29 = 2 * sizeof(struct qm_sg_entry);
920 	/* Auth_only_len is set as 0 in descriptor and it is overwritten
921 	 * here in the fd.cmd which will update the DPOVRD reg.
922 	 */
923 	if (auth_only_len)
924 		fd.cmd = 0x80000000 | auth_only_len;
925 	do {
926 		ret = qman_enqueue(&qp->inq, &fd, 0);
927 	} while (ret != 0);
928 
929 	return 0;
930 }
931 
932 static uint16_t
933 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
934 		       uint16_t nb_ops)
935 {
936 	/* Function to transmit the frames to given device and queuepair */
937 	uint32_t loop;
938 	int32_t ret;
939 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
940 	uint16_t num_tx = 0;
941 
942 	if (unlikely(nb_ops == 0))
943 		return 0;
944 
945 	/*Prepare each packet which is to be sent*/
946 	for (loop = 0; loop < nb_ops; loop++) {
947 		if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
948 			PMD_TX_LOG(ERR, "sessionless crypto op not supported");
949 			return 0;
950 		}
951 		ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
952 		if (!ret)
953 			num_tx++;
954 	}
955 	dpaa_qp->tx_pkts += num_tx;
956 	dpaa_qp->tx_errs += nb_ops - num_tx;
957 
958 	return num_tx;
959 }
960 
961 static uint16_t
962 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
963 		       uint16_t nb_ops)
964 {
965 	uint16_t num_rx;
966 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
967 
968 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
969 
970 	dpaa_qp->rx_pkts += num_rx;
971 	dpaa_qp->rx_errs += nb_ops - num_rx;
972 
973 	PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
974 
975 	return num_rx;
976 }
977 
978 /** Release queue pair */
979 static int
980 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
981 			    uint16_t qp_id)
982 {
983 	struct dpaa_sec_dev_private *internals;
984 	struct dpaa_sec_qp *qp = NULL;
985 
986 	PMD_INIT_FUNC_TRACE();
987 
988 	PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
989 
990 	internals = dev->data->dev_private;
991 	if (qp_id >= internals->max_nb_queue_pairs) {
992 		PMD_INIT_LOG(ERR, "Max supported qpid %d",
993 			     internals->max_nb_queue_pairs);
994 		return -EINVAL;
995 	}
996 
997 	qp = &internals->qps[qp_id];
998 	qp->internals = NULL;
999 	dev->data->queue_pairs[qp_id] = NULL;
1000 
1001 	return 0;
1002 }
1003 
1004 /** Setup a queue pair */
1005 static int
1006 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1007 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1008 		__rte_unused int socket_id,
1009 		__rte_unused struct rte_mempool *session_pool)
1010 {
1011 	struct dpaa_sec_dev_private *internals;
1012 	struct dpaa_sec_qp *qp = NULL;
1013 
1014 	PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1015 		     dev, qp_id, qp_conf);
1016 
1017 	internals = dev->data->dev_private;
1018 	if (qp_id >= internals->max_nb_queue_pairs) {
1019 		PMD_INIT_LOG(ERR, "Max supported qpid %d",
1020 			     internals->max_nb_queue_pairs);
1021 		return -EINVAL;
1022 	}
1023 
1024 	qp = &internals->qps[qp_id];
1025 	qp->internals = internals;
1026 	dev->data->queue_pairs[qp_id] = qp;
1027 
1028 	return 0;
1029 }
1030 
1031 /** Start queue pair */
1032 static int
1033 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1034 			  __rte_unused uint16_t queue_pair_id)
1035 {
1036 	PMD_INIT_FUNC_TRACE();
1037 
1038 	return 0;
1039 }
1040 
1041 /** Stop queue pair */
1042 static int
1043 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1044 			 __rte_unused uint16_t queue_pair_id)
1045 {
1046 	PMD_INIT_FUNC_TRACE();
1047 
1048 	return 0;
1049 }
1050 
1051 /** Return the number of allocated queue pairs */
1052 static uint32_t
1053 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1054 {
1055 	PMD_INIT_FUNC_TRACE();
1056 
1057 	return dev->data->nb_queue_pairs;
1058 }
1059 
1060 /** Returns the size of session structure */
1061 static unsigned int
1062 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1063 {
1064 	PMD_INIT_FUNC_TRACE();
1065 
1066 	return sizeof(dpaa_sec_session);
1067 }
1068 
1069 static int
1070 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1071 		     struct rte_crypto_sym_xform *xform,
1072 		     dpaa_sec_session *session)
1073 {
1074 	session->cipher_alg = xform->cipher.algo;
1075 	session->iv.length = xform->cipher.iv.length;
1076 	session->iv.offset = xform->cipher.iv.offset;
1077 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1078 					       RTE_CACHE_LINE_SIZE);
1079 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1080 		PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1081 		return -ENOMEM;
1082 	}
1083 	session->cipher_key.length = xform->cipher.key.length;
1084 
1085 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1086 	       xform->cipher.key.length);
1087 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1088 			DIR_ENC : DIR_DEC;
1089 
1090 	return 0;
1091 }
1092 
1093 static int
1094 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1095 		   struct rte_crypto_sym_xform *xform,
1096 		   dpaa_sec_session *session)
1097 {
1098 	session->auth_alg = xform->auth.algo;
1099 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1100 					     RTE_CACHE_LINE_SIZE);
1101 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1102 		PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1103 		return -ENOMEM;
1104 	}
1105 	session->auth_key.length = xform->auth.key.length;
1106 	session->digest_length = xform->auth.digest_length;
1107 
1108 	memcpy(session->auth_key.data, xform->auth.key.data,
1109 	       xform->auth.key.length);
1110 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1111 			DIR_ENC : DIR_DEC;
1112 
1113 	return 0;
1114 }
1115 
1116 static int
1117 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1118 		   struct rte_crypto_sym_xform *xform,
1119 		   dpaa_sec_session *session)
1120 {
1121 	session->aead_alg = xform->aead.algo;
1122 	session->iv.length = xform->aead.iv.length;
1123 	session->iv.offset = xform->aead.iv.offset;
1124 	session->auth_only_len = xform->aead.aad_length;
1125 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1126 					     RTE_CACHE_LINE_SIZE);
1127 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1128 		PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1129 		return -ENOMEM;
1130 	}
1131 	session->aead_key.length = xform->aead.key.length;
1132 	session->digest_length = xform->aead.digest_length;
1133 
1134 	memcpy(session->aead_key.data, xform->aead.key.data,
1135 	       xform->aead.key.length);
1136 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1137 			DIR_ENC : DIR_DEC;
1138 
1139 	return 0;
1140 }
1141 
1142 static int
1143 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
1144 {
1145 	dpaa_sec_session *sess = ses;
1146 	struct dpaa_sec_qp *qp;
1147 
1148 	PMD_INIT_FUNC_TRACE();
1149 
1150 	qp = dev->data->queue_pairs[qp_id];
1151 	if (qp->ses != NULL) {
1152 		PMD_INIT_LOG(ERR, "qp in-use by another session\n");
1153 		return -EBUSY;
1154 	}
1155 
1156 	qp->ses = sess;
1157 	sess->qp = qp;
1158 
1159 	return dpaa_sec_prep_cdb(sess);
1160 }
1161 
1162 static int
1163 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
1164 {
1165 	dpaa_sec_session *sess = ses;
1166 	struct dpaa_sec_qp *qp;
1167 
1168 	PMD_INIT_FUNC_TRACE();
1169 
1170 	qp = dev->data->queue_pairs[qp_id];
1171 	if (qp->ses != NULL) {
1172 		qp->ses = NULL;
1173 		sess->qp = NULL;
1174 		return 0;
1175 	}
1176 
1177 	PMD_DRV_LOG(ERR, "No session attached to qp");
1178 	return -EINVAL;
1179 }
1180 
1181 static int
1182 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1183 			    struct rte_crypto_sym_xform *xform,	void *sess)
1184 {
1185 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1186 	dpaa_sec_session *session = sess;
1187 
1188 	PMD_INIT_FUNC_TRACE();
1189 
1190 	if (unlikely(sess == NULL)) {
1191 		RTE_LOG(ERR, PMD, "invalid session struct\n");
1192 		return -EINVAL;
1193 	}
1194 
1195 	/* Default IV length = 0 */
1196 	session->iv.length = 0;
1197 
1198 	/* Cipher Only */
1199 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1200 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1201 		dpaa_sec_cipher_init(dev, xform, session);
1202 
1203 	/* Authentication Only */
1204 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1205 		   xform->next == NULL) {
1206 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1207 		dpaa_sec_auth_init(dev, xform, session);
1208 
1209 	/* Cipher then Authenticate */
1210 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1211 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1212 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1213 			dpaa_sec_cipher_init(dev, xform, session);
1214 			dpaa_sec_auth_init(dev, xform->next, session);
1215 		} else {
1216 			PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1217 			return -EINVAL;
1218 		}
1219 
1220 	/* Authenticate then Cipher */
1221 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1222 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1223 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1224 			dpaa_sec_auth_init(dev, xform, session);
1225 			dpaa_sec_cipher_init(dev, xform->next, session);
1226 		} else {
1227 			PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1228 			return -EINVAL;
1229 		}
1230 
1231 	/* AEAD operation for AES-GCM kind of Algorithms */
1232 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1233 		   xform->next == NULL) {
1234 		dpaa_sec_aead_init(dev, xform, session);
1235 
1236 	} else {
1237 		PMD_DRV_LOG(ERR, "Invalid crypto type");
1238 		return -EINVAL;
1239 	}
1240 	session->ctx_pool = internals->ctx_pool;
1241 
1242 	return 0;
1243 }
1244 
1245 static int
1246 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1247 		struct rte_crypto_sym_xform *xform,
1248 		struct rte_cryptodev_sym_session *sess,
1249 		struct rte_mempool *mempool)
1250 {
1251 	void *sess_private_data;
1252 	int ret;
1253 
1254 	PMD_INIT_FUNC_TRACE();
1255 
1256 	if (rte_mempool_get(mempool, &sess_private_data)) {
1257 		CDEV_LOG_ERR(
1258 			"Couldn't get object from session mempool");
1259 		return -ENOMEM;
1260 	}
1261 
1262 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1263 	if (ret != 0) {
1264 		PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1265 				"session parameters");
1266 
1267 		/* Return session to mempool */
1268 		rte_mempool_put(mempool, sess_private_data);
1269 		return ret;
1270 	}
1271 
1272 	set_session_private_data(sess, dev->driver_id,
1273 			sess_private_data);
1274 
1275 	return 0;
1276 }
1277 
1278 /** Clear the memory of session so it doesn't leave key material behind */
1279 static void
1280 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1281 		struct rte_cryptodev_sym_session *sess)
1282 {
1283 	PMD_INIT_FUNC_TRACE();
1284 	uint8_t index = dev->driver_id;
1285 	void *sess_priv = get_session_private_data(sess, index);
1286 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1287 
1288 	if (sess_priv) {
1289 		rte_free(s->cipher_key.data);
1290 		rte_free(s->auth_key.data);
1291 		memset(s, 0, sizeof(dpaa_sec_session));
1292 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1293 		set_session_private_data(sess, index, NULL);
1294 		rte_mempool_put(sess_mp, sess_priv);
1295 	}
1296 }
1297 
1298 static int
1299 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1300 		       struct rte_cryptodev_config *config __rte_unused)
1301 {
1302 	PMD_INIT_FUNC_TRACE();
1303 
1304 	return 0;
1305 }
1306 
1307 static int
1308 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
1309 {
1310 	PMD_INIT_FUNC_TRACE();
1311 	return 0;
1312 }
1313 
1314 static void
1315 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
1316 {
1317 	PMD_INIT_FUNC_TRACE();
1318 }
1319 
1320 static int
1321 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
1322 {
1323 	PMD_INIT_FUNC_TRACE();
1324 	return 0;
1325 }
1326 
1327 static void
1328 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
1329 		       struct rte_cryptodev_info *info)
1330 {
1331 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1332 
1333 	PMD_INIT_FUNC_TRACE();
1334 	if (info != NULL) {
1335 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1336 		info->feature_flags = dev->feature_flags;
1337 		info->capabilities = dpaa_sec_capabilities;
1338 		info->sym.max_nb_sessions = internals->max_nb_sessions;
1339 		info->sym.max_nb_sessions_per_qp =
1340 			RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / RTE_MAX_NB_SEC_QPS;
1341 		info->driver_id = cryptodev_driver_id;
1342 	}
1343 }
1344 
1345 static struct rte_cryptodev_ops crypto_ops = {
1346 	.dev_configure	      = dpaa_sec_dev_configure,
1347 	.dev_start	      = dpaa_sec_dev_start,
1348 	.dev_stop	      = dpaa_sec_dev_stop,
1349 	.dev_close	      = dpaa_sec_dev_close,
1350 	.dev_infos_get        = dpaa_sec_dev_infos_get,
1351 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
1352 	.queue_pair_release   = dpaa_sec_queue_pair_release,
1353 	.queue_pair_start     = dpaa_sec_queue_pair_start,
1354 	.queue_pair_stop      = dpaa_sec_queue_pair_stop,
1355 	.queue_pair_count     = dpaa_sec_queue_pair_count,
1356 	.session_get_size     = dpaa_sec_session_get_size,
1357 	.session_configure    = dpaa_sec_session_configure,
1358 	.session_clear        = dpaa_sec_session_clear,
1359 	.qp_attach_session    = dpaa_sec_qp_attach_sess,
1360 	.qp_detach_session    = dpaa_sec_qp_detach_sess,
1361 };
1362 
1363 static int
1364 dpaa_sec_uninit(struct rte_cryptodev *dev)
1365 {
1366 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1367 
1368 	if (dev == NULL)
1369 		return -ENODEV;
1370 
1371 	rte_mempool_free(internals->ctx_pool);
1372 	rte_free(internals);
1373 
1374 	PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
1375 		     dev->data->name, rte_socket_id());
1376 
1377 	return 0;
1378 }
1379 
1380 static int
1381 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
1382 {
1383 	struct dpaa_sec_dev_private *internals;
1384 	struct dpaa_sec_qp *qp;
1385 	uint32_t i;
1386 	int ret;
1387 	char str[20];
1388 
1389 	PMD_INIT_FUNC_TRACE();
1390 
1391 	cryptodev->driver_id = cryptodev_driver_id;
1392 	cryptodev->dev_ops = &crypto_ops;
1393 
1394 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
1395 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
1396 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1397 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
1398 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1399 
1400 	internals = cryptodev->data->dev_private;
1401 	internals->max_nb_queue_pairs = RTE_MAX_NB_SEC_QPS;
1402 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
1403 
1404 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
1405 		/* init qman fq for queue pair */
1406 		qp = &internals->qps[i];
1407 		ret = dpaa_sec_init_tx(&qp->outq);
1408 		if (ret) {
1409 			PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
1410 			goto init_error;
1411 		}
1412 		ret = dpaa_sec_init_rx(&qp->inq, dpaa_mem_vtop(&qp->cdb),
1413 				       qman_fq_fqid(&qp->outq));
1414 		if (ret) {
1415 			PMD_INIT_LOG(ERR, "config rx of queue pair %d", i);
1416 			goto init_error;
1417 		}
1418 	}
1419 
1420 	sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
1421 	internals->ctx_pool = rte_mempool_create((const char *)str,
1422 			CTX_POOL_NUM_BUFS,
1423 			CTX_POOL_BUF_SIZE,
1424 			CTX_POOL_CACHE_SIZE, 0,
1425 			NULL, NULL, NULL, NULL,
1426 			SOCKET_ID_ANY, 0);
1427 	if (!internals->ctx_pool) {
1428 		RTE_LOG(ERR, PMD, "%s create failed\n", str);
1429 		goto init_error;
1430 	}
1431 
1432 	PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1433 	return 0;
1434 
1435 init_error:
1436 	PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1437 
1438 	dpaa_sec_uninit(cryptodev);
1439 	return -EFAULT;
1440 }
1441 
1442 static int
1443 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
1444 				struct rte_dpaa_device *dpaa_dev)
1445 {
1446 	struct rte_cryptodev *cryptodev;
1447 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1448 
1449 	int retval;
1450 
1451 	sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
1452 
1453 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1454 	if (cryptodev == NULL)
1455 		return -ENOMEM;
1456 
1457 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1458 		cryptodev->data->dev_private = rte_zmalloc_socket(
1459 					"cryptodev private structure",
1460 					sizeof(struct dpaa_sec_dev_private),
1461 					RTE_CACHE_LINE_SIZE,
1462 					rte_socket_id());
1463 
1464 		if (cryptodev->data->dev_private == NULL)
1465 			rte_panic("Cannot allocate memzone for private "
1466 					"device data");
1467 	}
1468 
1469 	dpaa_dev->crypto_dev = cryptodev;
1470 	cryptodev->device = &dpaa_dev->device;
1471 	cryptodev->device->driver = &dpaa_drv->driver;
1472 
1473 	/* init user callbacks */
1474 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
1475 
1476 	/* if sec device version is not configured */
1477 	if (!rta_get_sec_era()) {
1478 		const struct device_node *caam_node;
1479 
1480 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
1481 			const uint32_t *prop = of_get_property(caam_node,
1482 					"fsl,sec-era",
1483 					NULL);
1484 			if (prop) {
1485 				rta_set_sec_era(
1486 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
1487 				break;
1488 			}
1489 		}
1490 	}
1491 
1492 	/* Invoke PMD device initialization function */
1493 	retval = dpaa_sec_dev_init(cryptodev);
1494 	if (retval == 0)
1495 		return 0;
1496 
1497 	/* In case of error, cleanup is done */
1498 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1499 		rte_free(cryptodev->data->dev_private);
1500 
1501 	rte_cryptodev_pmd_release_device(cryptodev);
1502 
1503 	return -ENXIO;
1504 }
1505 
1506 static int
1507 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
1508 {
1509 	struct rte_cryptodev *cryptodev;
1510 	int ret;
1511 
1512 	cryptodev = dpaa_dev->crypto_dev;
1513 	if (cryptodev == NULL)
1514 		return -ENODEV;
1515 
1516 	ret = dpaa_sec_uninit(cryptodev);
1517 	if (ret)
1518 		return ret;
1519 
1520 	/* free crypto device */
1521 	rte_cryptodev_pmd_release_device(cryptodev);
1522 
1523 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1524 		rte_free(cryptodev->data->dev_private);
1525 
1526 	PMD_INIT_LOG(INFO, "Closing dpaa crypto device %s",
1527 		     cryptodev->data->name);
1528 
1529 	cryptodev->device = NULL;
1530 	cryptodev->data = NULL;
1531 
1532 	return 0;
1533 }
1534 
1535 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
1536 	.drv_type = FSL_DPAA_CRYPTO,
1537 	.driver = {
1538 		.name = "DPAA SEC PMD"
1539 	},
1540 	.probe = cryptodev_dpaa_sec_probe,
1541 	.remove = cryptodev_dpaa_sec_remove,
1542 };
1543 
1544 static struct cryptodev_driver dpaa_sec_crypto_drv;
1545 
1546 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
1547 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
1548 		cryptodev_driver_id);
1549