xref: /dpdk/drivers/crypto/octeontx/otx_cryptodev_ops.c (revision 32b8f26adf8b26a55230408ff6adffd4b2327e52)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4 
5 #include <rte_alarm.h>
6 #include <rte_bus_pci.h>
7 #include <rte_cryptodev.h>
8 #include <rte_cryptodev_pmd.h>
9 #include <rte_errno.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 
13 #include "otx_cryptodev.h"
14 #include "otx_cryptodev_capabilities.h"
15 #include "otx_cryptodev_hw_access.h"
16 #include "otx_cryptodev_mbox.h"
17 #include "otx_cryptodev_ops.h"
18 
19 #include "cpt_pmd_logs.h"
20 #include "cpt_pmd_ops_helper.h"
21 #include "cpt_ucode.h"
22 #include "cpt_ucode_asym.h"
23 
24 static uint64_t otx_fpm_iova[CPT_EC_ID_PMAX];
25 
26 /* Forward declarations */
27 
28 static int
29 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id);
30 
31 /* Alarm routines */
32 
33 static void
34 otx_cpt_alarm_cb(void *arg)
35 {
36 	struct cpt_vf *cptvf = arg;
37 	otx_cpt_poll_misc(cptvf);
38 	rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
39 			  otx_cpt_alarm_cb, cptvf);
40 }
41 
42 static int
43 otx_cpt_periodic_alarm_start(void *arg)
44 {
45 	return rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
46 				 otx_cpt_alarm_cb, arg);
47 }
48 
49 static int
50 otx_cpt_periodic_alarm_stop(void *arg)
51 {
52 	return rte_eal_alarm_cancel(otx_cpt_alarm_cb, arg);
53 }
54 
55 /* PMD ops */
56 
57 static int
58 otx_cpt_dev_config(struct rte_cryptodev *dev,
59 		   struct rte_cryptodev_config *config __rte_unused)
60 {
61 	int ret = 0;
62 
63 	CPT_PMD_INIT_FUNC_TRACE();
64 
65 	if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
66 		/* Initialize shared FPM table */
67 		ret = cpt_fpm_init(otx_fpm_iova);
68 
69 	return ret;
70 }
71 
72 static int
73 otx_cpt_dev_start(struct rte_cryptodev *c_dev)
74 {
75 	void *cptvf = c_dev->data->dev_private;
76 
77 	CPT_PMD_INIT_FUNC_TRACE();
78 
79 	return otx_cpt_start_device(cptvf);
80 }
81 
82 static void
83 otx_cpt_dev_stop(struct rte_cryptodev *c_dev)
84 {
85 	void *cptvf = c_dev->data->dev_private;
86 
87 	CPT_PMD_INIT_FUNC_TRACE();
88 
89 	if (c_dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
90 		cpt_fpm_clear();
91 
92 	otx_cpt_stop_device(cptvf);
93 }
94 
95 static int
96 otx_cpt_dev_close(struct rte_cryptodev *c_dev)
97 {
98 	void *cptvf = c_dev->data->dev_private;
99 	int i, ret;
100 
101 	CPT_PMD_INIT_FUNC_TRACE();
102 
103 	for (i = 0; i < c_dev->data->nb_queue_pairs; i++) {
104 		ret = otx_cpt_que_pair_release(c_dev, i);
105 		if (ret)
106 			return ret;
107 	}
108 
109 	otx_cpt_periodic_alarm_stop(cptvf);
110 	otx_cpt_deinit_device(cptvf);
111 
112 	return 0;
113 }
114 
115 static void
116 otx_cpt_dev_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *info)
117 {
118 	CPT_PMD_INIT_FUNC_TRACE();
119 	if (info != NULL) {
120 		info->max_nb_queue_pairs = CPT_NUM_QS_PER_VF;
121 		info->feature_flags = dev->feature_flags;
122 		info->capabilities = otx_get_capabilities(info->feature_flags);
123 		info->sym.max_nb_sessions = 0;
124 		info->driver_id = otx_cryptodev_driver_id;
125 		info->min_mbuf_headroom_req = OTX_CPT_MIN_HEADROOM_REQ;
126 		info->min_mbuf_tailroom_req = OTX_CPT_MIN_TAILROOM_REQ;
127 	}
128 }
129 
130 static void
131 otx_cpt_stats_get(struct rte_cryptodev *dev __rte_unused,
132 		  struct rte_cryptodev_stats *stats __rte_unused)
133 {
134 	CPT_PMD_INIT_FUNC_TRACE();
135 }
136 
137 static void
138 otx_cpt_stats_reset(struct rte_cryptodev *dev __rte_unused)
139 {
140 	CPT_PMD_INIT_FUNC_TRACE();
141 }
142 
143 static int
144 otx_cpt_que_pair_setup(struct rte_cryptodev *dev,
145 		       uint16_t que_pair_id,
146 		       const struct rte_cryptodev_qp_conf *qp_conf,
147 		       int socket_id __rte_unused)
148 {
149 	struct cpt_instance *instance = NULL;
150 	struct rte_pci_device *pci_dev;
151 	int ret = -1;
152 
153 	CPT_PMD_INIT_FUNC_TRACE();
154 
155 	if (dev->data->queue_pairs[que_pair_id] != NULL) {
156 		ret = otx_cpt_que_pair_release(dev, que_pair_id);
157 		if (ret)
158 			return ret;
159 	}
160 
161 	if (qp_conf->nb_descriptors > DEFAULT_CMD_QLEN) {
162 		CPT_LOG_INFO("Number of descriptors too big %d, using default "
163 			     "queue length of %d", qp_conf->nb_descriptors,
164 			     DEFAULT_CMD_QLEN);
165 	}
166 
167 	pci_dev = RTE_DEV_TO_PCI(dev->device);
168 
169 	if (pci_dev->mem_resource[0].addr == NULL) {
170 		CPT_LOG_ERR("PCI mem address null");
171 		return -EIO;
172 	}
173 
174 	ret = otx_cpt_get_resource(dev, 0, &instance, que_pair_id);
175 	if (ret != 0 || instance == NULL) {
176 		CPT_LOG_ERR("Error getting instance handle from device %s : "
177 			    "ret = %d", dev->data->name, ret);
178 		return ret;
179 	}
180 
181 	instance->queue_id = que_pair_id;
182 	instance->sess_mp = qp_conf->mp_session;
183 	instance->sess_mp_priv = qp_conf->mp_session_private;
184 	dev->data->queue_pairs[que_pair_id] = instance;
185 
186 	return 0;
187 }
188 
189 static int
190 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id)
191 {
192 	struct cpt_instance *instance = dev->data->queue_pairs[que_pair_id];
193 	int ret;
194 
195 	CPT_PMD_INIT_FUNC_TRACE();
196 
197 	ret = otx_cpt_put_resource(instance);
198 	if (ret != 0) {
199 		CPT_LOG_ERR("Error putting instance handle of device %s : "
200 			    "ret = %d", dev->data->name, ret);
201 		return ret;
202 	}
203 
204 	dev->data->queue_pairs[que_pair_id] = NULL;
205 
206 	return 0;
207 }
208 
209 static unsigned int
210 otx_cpt_get_session_size(struct rte_cryptodev *dev __rte_unused)
211 {
212 	return cpt_get_session_size();
213 }
214 
215 static void
216 otx_cpt_session_init(void *sym_sess, uint8_t driver_id)
217 {
218 	struct rte_cryptodev_sym_session *sess = sym_sess;
219 	struct cpt_sess_misc *cpt_sess =
220 	 (struct cpt_sess_misc *) get_sym_session_private_data(sess, driver_id);
221 
222 	CPT_PMD_INIT_FUNC_TRACE();
223 	cpt_sess->ctx_dma_addr = rte_mempool_virt2iova(cpt_sess) +
224 			sizeof(struct cpt_sess_misc);
225 }
226 
227 static int
228 otx_cpt_session_cfg(struct rte_cryptodev *dev,
229 		    struct rte_crypto_sym_xform *xform,
230 		    struct rte_cryptodev_sym_session *sess,
231 		    struct rte_mempool *mempool)
232 {
233 	struct rte_crypto_sym_xform *chain;
234 	void *sess_private_data = NULL;
235 
236 	CPT_PMD_INIT_FUNC_TRACE();
237 
238 	if (cpt_is_algo_supported(xform))
239 		goto err;
240 
241 	if (unlikely(sess == NULL)) {
242 		CPT_LOG_ERR("invalid session struct");
243 		return -EINVAL;
244 	}
245 
246 	if (rte_mempool_get(mempool, &sess_private_data)) {
247 		CPT_LOG_ERR("Could not allocate sess_private_data");
248 		return -ENOMEM;
249 	}
250 
251 	chain = xform;
252 	while (chain) {
253 		switch (chain->type) {
254 		case RTE_CRYPTO_SYM_XFORM_AEAD:
255 			if (fill_sess_aead(chain, sess_private_data))
256 				goto err;
257 			break;
258 		case RTE_CRYPTO_SYM_XFORM_CIPHER:
259 			if (fill_sess_cipher(chain, sess_private_data))
260 				goto err;
261 			break;
262 		case RTE_CRYPTO_SYM_XFORM_AUTH:
263 			if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
264 				if (fill_sess_gmac(chain, sess_private_data))
265 					goto err;
266 			} else {
267 				if (fill_sess_auth(chain, sess_private_data))
268 					goto err;
269 			}
270 			break;
271 		default:
272 			CPT_LOG_ERR("Invalid crypto xform type");
273 			break;
274 		}
275 		chain = chain->next;
276 	}
277 	set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
278 	otx_cpt_session_init(sess, dev->driver_id);
279 	return 0;
280 
281 err:
282 	if (sess_private_data)
283 		rte_mempool_put(mempool, sess_private_data);
284 	return -EPERM;
285 }
286 
287 static void
288 otx_cpt_session_clear(struct rte_cryptodev *dev,
289 		  struct rte_cryptodev_sym_session *sess)
290 {
291 	void *sess_priv = get_sym_session_private_data(sess, dev->driver_id);
292 
293 	CPT_PMD_INIT_FUNC_TRACE();
294 	if (sess_priv) {
295 		memset(sess_priv, 0, otx_cpt_get_session_size(dev));
296 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
297 		set_sym_session_private_data(sess, dev->driver_id, NULL);
298 		rte_mempool_put(sess_mp, sess_priv);
299 	}
300 }
301 
302 static unsigned int
303 otx_cpt_asym_session_size_get(struct rte_cryptodev *dev __rte_unused)
304 {
305 	return sizeof(struct cpt_asym_sess_misc);
306 }
307 
308 static int
309 otx_cpt_asym_session_cfg(struct rte_cryptodev *dev,
310 			 struct rte_crypto_asym_xform *xform __rte_unused,
311 			 struct rte_cryptodev_asym_session *sess,
312 			 struct rte_mempool *pool)
313 {
314 	struct cpt_asym_sess_misc *priv;
315 	int ret;
316 
317 	CPT_PMD_INIT_FUNC_TRACE();
318 
319 	if (rte_mempool_get(pool, (void **)&priv)) {
320 		CPT_LOG_ERR("Could not allocate session private data");
321 		return -ENOMEM;
322 	}
323 
324 	memset(priv, 0, sizeof(struct cpt_asym_sess_misc));
325 
326 	ret = cpt_fill_asym_session_parameters(priv, xform);
327 	if (ret) {
328 		CPT_LOG_ERR("Could not configure session parameters");
329 
330 		/* Return session to mempool */
331 		rte_mempool_put(pool, priv);
332 		return ret;
333 	}
334 
335 	set_asym_session_private_data(sess, dev->driver_id, priv);
336 	return 0;
337 }
338 
339 static void
340 otx_cpt_asym_session_clear(struct rte_cryptodev *dev,
341 			   struct rte_cryptodev_asym_session *sess)
342 {
343 	struct cpt_asym_sess_misc *priv;
344 	struct rte_mempool *sess_mp;
345 
346 	CPT_PMD_INIT_FUNC_TRACE();
347 
348 	priv = get_asym_session_private_data(sess, dev->driver_id);
349 
350 	if (priv == NULL)
351 		return;
352 
353 	/* Free resources allocated during session configure */
354 	cpt_free_asym_session_parameters(priv);
355 	memset(priv, 0, otx_cpt_asym_session_size_get(dev));
356 	sess_mp = rte_mempool_from_obj(priv);
357 	set_asym_session_private_data(sess, dev->driver_id, NULL);
358 	rte_mempool_put(sess_mp, priv);
359 }
360 
361 static __rte_always_inline int32_t __rte_hot
362 otx_cpt_request_enqueue(struct cpt_instance *instance,
363 			struct pending_queue *pqueue,
364 			void *req)
365 {
366 	struct cpt_request_info *user_req = (struct cpt_request_info *)req;
367 
368 	if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
369 		return -EAGAIN;
370 
371 	fill_cpt_inst(instance, req);
372 
373 	CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
374 
375 	/* Fill time_out cycles */
376 	user_req->time_out = rte_get_timer_cycles() +
377 			DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
378 	user_req->extra_time = 0;
379 
380 	/* Default mode of software queue */
381 	mark_cpt_inst(instance);
382 
383 	pqueue->rid_queue[pqueue->enq_tail].rid = (uintptr_t)user_req;
384 
385 	/* We will use soft queue length here to limit requests */
386 	MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
387 	pqueue->pending_count += 1;
388 
389 	CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
390 			 "op: %p", user_req, user_req->op);
391 	return 0;
392 }
393 
394 static __rte_always_inline int __rte_hot
395 otx_cpt_enq_single_asym(struct cpt_instance *instance,
396 			struct rte_crypto_op *op,
397 			struct pending_queue *pqueue)
398 {
399 	struct cpt_qp_meta_info *minfo = &instance->meta_info;
400 	struct rte_crypto_asym_op *asym_op = op->asym;
401 	struct asym_op_params params = {0};
402 	struct cpt_asym_sess_misc *sess;
403 	uintptr_t *cop;
404 	void *mdata;
405 	int ret;
406 
407 	if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
408 		CPT_LOG_DP_ERR("Could not allocate meta buffer for request");
409 		return -ENOMEM;
410 	}
411 
412 	sess = get_asym_session_private_data(asym_op->session,
413 					     otx_cryptodev_driver_id);
414 
415 	/* Store phys_addr of the mdata to meta_buf */
416 	params.meta_buf = rte_mempool_virt2iova(mdata);
417 
418 	cop = mdata;
419 	cop[0] = (uintptr_t)mdata;
420 	cop[1] = (uintptr_t)op;
421 	cop[2] = cop[3] = 0ULL;
422 
423 	params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t));
424 	params.req->op = cop;
425 
426 	/* Adjust meta_buf by crypto_op data  and request_info struct */
427 	params.meta_buf += (4 * sizeof(uintptr_t)) +
428 			   sizeof(struct cpt_request_info);
429 
430 	switch (sess->xfrm_type) {
431 	case RTE_CRYPTO_ASYM_XFORM_MODEX:
432 		ret = cpt_modex_prep(&params, &sess->mod_ctx);
433 		if (unlikely(ret))
434 			goto req_fail;
435 		break;
436 	case RTE_CRYPTO_ASYM_XFORM_RSA:
437 		ret = cpt_enqueue_rsa_op(op, &params, sess);
438 		if (unlikely(ret))
439 			goto req_fail;
440 		break;
441 	case RTE_CRYPTO_ASYM_XFORM_ECDSA:
442 		ret = cpt_enqueue_ecdsa_op(op, &params, sess, otx_fpm_iova);
443 		if (unlikely(ret))
444 			goto req_fail;
445 		break;
446 	case RTE_CRYPTO_ASYM_XFORM_ECPM:
447 		ret = cpt_ecpm_prep(&asym_op->ecpm, &params,
448 				    sess->ec_ctx.curveid);
449 		if (unlikely(ret))
450 			goto req_fail;
451 		break;
452 
453 	default:
454 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
455 		ret = -EINVAL;
456 		goto req_fail;
457 	}
458 
459 	ret = otx_cpt_request_enqueue(instance, pqueue, params.req);
460 
461 	if (unlikely(ret)) {
462 		CPT_LOG_DP_ERR("Could not enqueue crypto req");
463 		goto req_fail;
464 	}
465 
466 	return 0;
467 
468 req_fail:
469 	free_op_meta(mdata, minfo->pool);
470 
471 	return ret;
472 }
473 
474 static __rte_always_inline int __rte_hot
475 otx_cpt_enq_single_sym(struct cpt_instance *instance,
476 		       struct rte_crypto_op *op,
477 		       struct pending_queue *pqueue)
478 {
479 	struct cpt_sess_misc *sess;
480 	struct rte_crypto_sym_op *sym_op = op->sym;
481 	void *prep_req, *mdata = NULL;
482 	int ret = 0;
483 	uint64_t cpt_op;
484 
485 	sess = (struct cpt_sess_misc *)
486 			get_sym_session_private_data(sym_op->session,
487 						     otx_cryptodev_driver_id);
488 
489 	cpt_op = sess->cpt_op;
490 
491 	if (likely(cpt_op & CPT_OP_CIPHER_MASK))
492 		ret = fill_fc_params(op, sess, &instance->meta_info, &mdata,
493 				     &prep_req);
494 	else
495 		ret = fill_digest_params(op, sess, &instance->meta_info,
496 					 &mdata, &prep_req);
497 
498 	if (unlikely(ret)) {
499 		CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
500 			       "ret 0x%x", op, (unsigned int)cpt_op, ret);
501 		return ret;
502 	}
503 
504 	/* Enqueue prepared instruction to h/w */
505 	ret = otx_cpt_request_enqueue(instance, pqueue, prep_req);
506 
507 	if (unlikely(ret)) {
508 		/* Buffer allocated for request preparation need to be freed */
509 		free_op_meta(mdata, instance->meta_info.pool);
510 		return ret;
511 	}
512 
513 	return 0;
514 }
515 
516 static __rte_always_inline int __rte_hot
517 otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
518 				struct rte_crypto_op *op,
519 				struct pending_queue *pqueue)
520 {
521 	struct cpt_sess_misc *sess;
522 	struct rte_crypto_sym_op *sym_op = op->sym;
523 	int ret;
524 	void *sess_t = NULL;
525 	void *sess_private_data_t = NULL;
526 
527 	/* Create tmp session */
528 
529 	if (rte_mempool_get(instance->sess_mp, (void **)&sess_t)) {
530 		ret = -ENOMEM;
531 		goto exit;
532 	}
533 
534 	if (rte_mempool_get(instance->sess_mp_priv,
535 			(void **)&sess_private_data_t)) {
536 		ret = -ENOMEM;
537 		goto free_sess;
538 	}
539 
540 	sess = (struct cpt_sess_misc *)sess_private_data_t;
541 
542 	sess->ctx_dma_addr = rte_mempool_virt2iova(sess) +
543 			sizeof(struct cpt_sess_misc);
544 
545 	ret = instance_session_cfg(sym_op->xform, (void *)sess);
546 	if (unlikely(ret)) {
547 		ret = -EINVAL;
548 		goto free_sess_priv;
549 	}
550 
551 	/* Save tmp session in op */
552 
553 	sym_op->session = (struct rte_cryptodev_sym_session *)sess_t;
554 	set_sym_session_private_data(sym_op->session, otx_cryptodev_driver_id,
555 				     sess_private_data_t);
556 
557 	/* Enqueue op with the tmp session set */
558 	ret = otx_cpt_enq_single_sym(instance, op, pqueue);
559 
560 	if (unlikely(ret))
561 		goto free_sess_priv;
562 
563 	return 0;
564 
565 free_sess_priv:
566 	rte_mempool_put(instance->sess_mp_priv, sess_private_data_t);
567 free_sess:
568 	rte_mempool_put(instance->sess_mp, sess_t);
569 exit:
570 	return ret;
571 }
572 
573 #define OP_TYPE_SYM		0
574 #define OP_TYPE_ASYM		1
575 
576 static __rte_always_inline int __rte_hot
577 otx_cpt_enq_single(struct cpt_instance *inst,
578 		   struct rte_crypto_op *op,
579 		   struct pending_queue *pqueue,
580 		   const uint8_t op_type)
581 {
582 	/* Check for the type */
583 
584 	if (op_type == OP_TYPE_SYM) {
585 		if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
586 			return otx_cpt_enq_single_sym(inst, op, pqueue);
587 		else
588 			return otx_cpt_enq_single_sym_sessless(inst, op,
589 							       pqueue);
590 	}
591 
592 	if (op_type == OP_TYPE_ASYM) {
593 		if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
594 			return otx_cpt_enq_single_asym(inst, op, pqueue);
595 	}
596 
597 	/* Should not reach here */
598 	return -ENOTSUP;
599 }
600 
601 static  __rte_always_inline uint16_t __rte_hot
602 otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
603 		    const uint8_t op_type)
604 {
605 	struct cpt_instance *instance = (struct cpt_instance *)qptr;
606 	uint16_t count;
607 	int ret;
608 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
609 	struct pending_queue *pqueue = &cptvf->pqueue;
610 
611 	count = DEFAULT_CMD_QLEN - pqueue->pending_count;
612 	if (nb_ops > count)
613 		nb_ops = count;
614 
615 	count = 0;
616 	while (likely(count < nb_ops)) {
617 
618 		/* Enqueue single op */
619 		ret = otx_cpt_enq_single(instance, ops[count], pqueue, op_type);
620 
621 		if (unlikely(ret))
622 			break;
623 		count++;
624 	}
625 	otx_cpt_ring_dbell(instance, count);
626 	return count;
627 }
628 
629 static uint16_t
630 otx_cpt_enqueue_asym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
631 {
632 	return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_ASYM);
633 }
634 
635 static uint16_t
636 otx_cpt_enqueue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
637 {
638 	return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_SYM);
639 }
640 
641 static inline void
642 otx_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
643 		    struct rte_crypto_rsa_xform *rsa_ctx)
644 
645 {
646 	struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
647 
648 	switch (rsa->op_type) {
649 	case RTE_CRYPTO_ASYM_OP_ENCRYPT:
650 		rsa->cipher.length = rsa_ctx->n.length;
651 		memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length);
652 		break;
653 	case RTE_CRYPTO_ASYM_OP_DECRYPT:
654 		if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE)
655 			rsa->message.length = rsa_ctx->n.length;
656 		else {
657 			/* Get length of decrypted output */
658 			rsa->message.length = rte_cpu_to_be_16
659 					(*((uint16_t *)req->rptr));
660 
661 			/* Offset data pointer by length fields */
662 			req->rptr += 2;
663 		}
664 		memcpy(rsa->message.data, req->rptr, rsa->message.length);
665 		break;
666 	case RTE_CRYPTO_ASYM_OP_SIGN:
667 		rsa->sign.length = rsa_ctx->n.length;
668 		memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
669 		break;
670 	case RTE_CRYPTO_ASYM_OP_VERIFY:
671 		if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE)
672 			rsa->sign.length = rsa_ctx->n.length;
673 		else {
674 			/* Get length of decrypted output */
675 			rsa->sign.length = rte_cpu_to_be_16
676 					(*((uint16_t *)req->rptr));
677 
678 			/* Offset data pointer by length fields */
679 			req->rptr += 2;
680 		}
681 		memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
682 
683 		if (memcmp(rsa->sign.data, rsa->message.data,
684 			   rsa->message.length)) {
685 			CPT_LOG_DP_ERR("RSA verification failed");
686 			cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
687 		}
688 		break;
689 	default:
690 		CPT_LOG_DP_DEBUG("Invalid RSA operation type");
691 		cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
692 		break;
693 	}
694 }
695 
696 static __rte_always_inline void
697 otx_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
698 			    struct cpt_request_info *req,
699 			    struct cpt_asym_ec_ctx *ec)
700 
701 {
702 	int prime_len = ec_grp[ec->curveid].prime.length;
703 
704 	if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
705 		return;
706 
707 	/* Separate out sign r and s components */
708 	memcpy(ecdsa->r.data, req->rptr, prime_len);
709 	memcpy(ecdsa->s.data, req->rptr + ROUNDUP8(prime_len), prime_len);
710 	ecdsa->r.length = prime_len;
711 	ecdsa->s.length = prime_len;
712 }
713 
714 static __rte_always_inline void
715 otx_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
716 			     struct cpt_request_info *req,
717 			     struct cpt_asym_ec_ctx *ec)
718 {
719 	int prime_len = ec_grp[ec->curveid].prime.length;
720 
721 	memcpy(ecpm->r.x.data, req->rptr, prime_len);
722 	memcpy(ecpm->r.y.data, req->rptr + ROUNDUP8(prime_len), prime_len);
723 	ecpm->r.x.length = prime_len;
724 	ecpm->r.y.length = prime_len;
725 }
726 
727 static __rte_always_inline void __rte_hot
728 otx_cpt_asym_post_process(struct rte_crypto_op *cop,
729 			  struct cpt_request_info *req)
730 {
731 	struct rte_crypto_asym_op *op = cop->asym;
732 	struct cpt_asym_sess_misc *sess;
733 
734 	sess = get_asym_session_private_data(op->session,
735 					     otx_cryptodev_driver_id);
736 
737 	switch (sess->xfrm_type) {
738 	case RTE_CRYPTO_ASYM_XFORM_RSA:
739 		otx_cpt_asym_rsa_op(cop, req, &sess->rsa_ctx);
740 		break;
741 	case RTE_CRYPTO_ASYM_XFORM_MODEX:
742 		op->modex.result.length = sess->mod_ctx.modulus.length;
743 		memcpy(op->modex.result.data, req->rptr,
744 		       op->modex.result.length);
745 		break;
746 	case RTE_CRYPTO_ASYM_XFORM_ECDSA:
747 		otx_cpt_asym_dequeue_ecdsa_op(&op->ecdsa, req, &sess->ec_ctx);
748 		break;
749 	case RTE_CRYPTO_ASYM_XFORM_ECPM:
750 		otx_cpt_asym_dequeue_ecpm_op(&op->ecpm, req, &sess->ec_ctx);
751 		break;
752 	default:
753 		CPT_LOG_DP_DEBUG("Invalid crypto xform type");
754 		cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
755 		break;
756 	}
757 }
758 
759 static __rte_always_inline void __rte_hot
760 otx_cpt_dequeue_post_process(struct rte_crypto_op *cop, uintptr_t *rsp,
761 			     const uint8_t op_type)
762 {
763 	/* H/w has returned success */
764 	cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
765 
766 	/* Perform further post processing */
767 
768 	if ((op_type == OP_TYPE_SYM) &&
769 	    (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
770 		/* Check if auth verify need to be completed */
771 		if (unlikely(rsp[2]))
772 			compl_auth_verify(cop, (uint8_t *)rsp[2], rsp[3]);
773 		return;
774 	}
775 
776 	if ((op_type == OP_TYPE_ASYM) &&
777 	    (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)) {
778 		rsp = RTE_PTR_ADD(rsp, 4 * sizeof(uintptr_t));
779 		otx_cpt_asym_post_process(cop, (struct cpt_request_info *)rsp);
780 	}
781 
782 	return;
783 }
784 
785 static __rte_always_inline uint16_t __rte_hot
786 otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
787 		    const uint8_t op_type)
788 {
789 	struct cpt_instance *instance = (struct cpt_instance *)qptr;
790 	struct cpt_request_info *user_req;
791 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
792 	struct rid *rid_e;
793 	uint8_t cc[nb_ops];
794 	int i, count, pcount;
795 	uint8_t ret;
796 	int nb_completed;
797 	struct pending_queue *pqueue = &cptvf->pqueue;
798 	struct rte_crypto_op *cop;
799 	void *metabuf;
800 	uintptr_t *rsp;
801 
802 	pcount = pqueue->pending_count;
803 	count = (nb_ops > pcount) ? pcount : nb_ops;
804 
805 	for (i = 0; i < count; i++) {
806 		rid_e = &pqueue->rid_queue[pqueue->deq_head];
807 		user_req = (struct cpt_request_info *)(rid_e->rid);
808 
809 		if (likely((i+1) < count))
810 			rte_prefetch_non_temporal((void *)rid_e[1].rid);
811 
812 		ret = check_nb_command_id(user_req, instance);
813 
814 		if (unlikely(ret == ERR_REQ_PENDING)) {
815 			/* Stop checking for completions */
816 			break;
817 		}
818 
819 		/* Return completion code and op handle */
820 		cc[i] = ret;
821 		ops[i] = user_req->op;
822 
823 		CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d",
824 				 user_req, user_req->op, ret);
825 
826 		MOD_INC(pqueue->deq_head, DEFAULT_CMD_QLEN);
827 		pqueue->pending_count -= 1;
828 	}
829 
830 	nb_completed = i;
831 
832 	for (i = 0; i < nb_completed; i++) {
833 
834 		rsp = (void *)ops[i];
835 
836 		if (likely((i + 1) < nb_completed))
837 			rte_prefetch0(ops[i+1]);
838 
839 		metabuf = (void *)rsp[0];
840 		cop = (void *)rsp[1];
841 
842 		ops[i] = cop;
843 
844 		/* Check completion code */
845 
846 		if (likely(cc[i] == 0)) {
847 			/* H/w success pkt. Post process */
848 			otx_cpt_dequeue_post_process(cop, rsp, op_type);
849 		} else if (cc[i] == ERR_GC_ICV_MISCOMPARE) {
850 			/* auth data mismatch */
851 			cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
852 		} else {
853 			/* Error */
854 			cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
855 		}
856 
857 		if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
858 			void *sess_private_data_t =
859 				get_sym_session_private_data(cop->sym->session,
860 						otx_cryptodev_driver_id);
861 			memset(sess_private_data_t, 0,
862 					cpt_get_session_size());
863 			memset(cop->sym->session, 0,
864 			rte_cryptodev_sym_get_existing_header_session_size(
865 					cop->sym->session));
866 			rte_mempool_put(instance->sess_mp_priv,
867 					sess_private_data_t);
868 			rte_mempool_put(instance->sess_mp, cop->sym->session);
869 			cop->sym->session = NULL;
870 		}
871 		free_op_meta(metabuf, instance->meta_info.pool);
872 	}
873 
874 	return nb_completed;
875 }
876 
877 static uint16_t
878 otx_cpt_dequeue_asym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
879 {
880 	return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_ASYM);
881 }
882 
883 static uint16_t
884 otx_cpt_dequeue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
885 {
886 	return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_SYM);
887 }
888 
889 static struct rte_cryptodev_ops cptvf_ops = {
890 	/* Device related operations */
891 	.dev_configure = otx_cpt_dev_config,
892 	.dev_start = otx_cpt_dev_start,
893 	.dev_stop = otx_cpt_dev_stop,
894 	.dev_close = otx_cpt_dev_close,
895 	.dev_infos_get = otx_cpt_dev_info_get,
896 
897 	.stats_get = otx_cpt_stats_get,
898 	.stats_reset = otx_cpt_stats_reset,
899 	.queue_pair_setup = otx_cpt_que_pair_setup,
900 	.queue_pair_release = otx_cpt_que_pair_release,
901 
902 	/* Crypto related operations */
903 	.sym_session_get_size = otx_cpt_get_session_size,
904 	.sym_session_configure = otx_cpt_session_cfg,
905 	.sym_session_clear = otx_cpt_session_clear,
906 
907 	.asym_session_get_size = otx_cpt_asym_session_size_get,
908 	.asym_session_configure = otx_cpt_asym_session_cfg,
909 	.asym_session_clear = otx_cpt_asym_session_clear,
910 };
911 
912 int
913 otx_cpt_dev_create(struct rte_cryptodev *c_dev)
914 {
915 	struct rte_pci_device *pdev = RTE_DEV_TO_PCI(c_dev->device);
916 	struct cpt_vf *cptvf = NULL;
917 	void *reg_base;
918 	char dev_name[32];
919 	int ret;
920 
921 	if (pdev->mem_resource[0].phys_addr == 0ULL)
922 		return -EIO;
923 
924 	/* for secondary processes, we don't initialise any further as primary
925 	 * has already done this work.
926 	 */
927 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
928 		return 0;
929 
930 	cptvf = rte_zmalloc_socket("otx_cryptodev_private_mem",
931 			sizeof(struct cpt_vf), RTE_CACHE_LINE_SIZE,
932 			rte_socket_id());
933 
934 	if (cptvf == NULL) {
935 		CPT_LOG_ERR("Cannot allocate memory for device private data");
936 		return -ENOMEM;
937 	}
938 
939 	snprintf(dev_name, 32, "%02x:%02x.%x",
940 			pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
941 
942 	reg_base = pdev->mem_resource[0].addr;
943 	if (!reg_base) {
944 		CPT_LOG_ERR("Failed to map BAR0 of %s", dev_name);
945 		ret = -ENODEV;
946 		goto fail;
947 	}
948 
949 	ret = otx_cpt_hw_init(cptvf, pdev, reg_base, dev_name);
950 	if (ret) {
951 		CPT_LOG_ERR("Failed to init cptvf %s", dev_name);
952 		ret = -EIO;
953 		goto fail;
954 	}
955 
956 	switch (cptvf->vftype) {
957 	case OTX_CPT_VF_TYPE_AE:
958 		/* Set asymmetric cpt feature flags */
959 		c_dev->feature_flags = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
960 				RTE_CRYPTODEV_FF_HW_ACCELERATED |
961 				RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT;
962 		break;
963 	case OTX_CPT_VF_TYPE_SE:
964 		/* Set symmetric cpt feature flags */
965 		c_dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
966 				RTE_CRYPTODEV_FF_HW_ACCELERATED |
967 				RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
968 				RTE_CRYPTODEV_FF_IN_PLACE_SGL |
969 				RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
970 				RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
971 				RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
972 				RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
973 		break;
974 	default:
975 		/* Feature not supported. Abort */
976 		CPT_LOG_ERR("VF type not supported by %s", dev_name);
977 		ret = -EIO;
978 		goto deinit_dev;
979 	}
980 
981 	/* Start off timer for mailbox interrupts */
982 	otx_cpt_periodic_alarm_start(cptvf);
983 
984 	c_dev->dev_ops = &cptvf_ops;
985 
986 	if (c_dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
987 		c_dev->enqueue_burst = otx_cpt_enqueue_sym;
988 		c_dev->dequeue_burst = otx_cpt_dequeue_sym;
989 	} else {
990 		c_dev->enqueue_burst = otx_cpt_enqueue_asym;
991 		c_dev->dequeue_burst = otx_cpt_dequeue_asym;
992 	}
993 
994 	/* Save dev private data */
995 	c_dev->data->dev_private = cptvf;
996 
997 	return 0;
998 
999 deinit_dev:
1000 	otx_cpt_deinit_device(cptvf);
1001 
1002 fail:
1003 	if (cptvf) {
1004 		/* Free private data allocated */
1005 		rte_free(cptvf);
1006 	}
1007 
1008 	return ret;
1009 }
1010