xref: /dpdk/drivers/crypto/octeontx/otx_cryptodev_ops.c (revision e9c659426416c95c4469b16e6bcc7700f1898278)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4 
5 #include <rte_alarm.h>
6 #include <rte_bus_pci.h>
7 #include <rte_cryptodev.h>
8 #include <rte_cryptodev_pmd.h>
9 #include <rte_malloc.h>
10 
11 #include "cpt_pmd_logs.h"
12 #include "cpt_pmd_ops_helper.h"
13 #include "cpt_ucode.h"
14 
15 #include "otx_cryptodev.h"
16 #include "otx_cryptodev_capabilities.h"
17 #include "otx_cryptodev_hw_access.h"
18 #include "otx_cryptodev_ops.h"
19 
20 static int otx_cryptodev_probe_count;
21 static rte_spinlock_t otx_probe_count_lock = RTE_SPINLOCK_INITIALIZER;
22 
23 static struct rte_mempool *otx_cpt_meta_pool;
24 static int otx_cpt_op_mlen;
25 static int otx_cpt_op_sb_mlen;
26 
27 /* Forward declarations */
28 
29 static int
30 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id);
31 
32 /*
33  * Initializes global variables used by fast-path code
34  *
35  * @return
36  *   - 0 on success, errcode on error
37  */
38 static int
39 init_global_resources(void)
40 {
41 	/* Get meta len for scatter gather mode */
42 	otx_cpt_op_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
43 
44 	/* Extra 4B saved for future considerations */
45 	otx_cpt_op_mlen += 4 * sizeof(uint64_t);
46 
47 	otx_cpt_meta_pool = rte_mempool_create("cpt_metabuf-pool", 4096 * 16,
48 					       otx_cpt_op_mlen, 512, 0,
49 					       NULL, NULL, NULL, NULL,
50 					       SOCKET_ID_ANY, 0);
51 	if (!otx_cpt_meta_pool) {
52 		CPT_LOG_ERR("cpt metabuf pool not created");
53 		return -ENOMEM;
54 	}
55 
56 	/* Get meta len for direct mode */
57 	otx_cpt_op_sb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
58 
59 	/* Extra 4B saved for future considerations */
60 	otx_cpt_op_sb_mlen += 4 * sizeof(uint64_t);
61 
62 	return 0;
63 }
64 
65 void
66 cleanup_global_resources(void)
67 {
68 	/* Take lock */
69 	rte_spinlock_lock(&otx_probe_count_lock);
70 
71 	/* Decrement the cryptodev count */
72 	otx_cryptodev_probe_count--;
73 
74 	/* Free buffers */
75 	if (otx_cpt_meta_pool && otx_cryptodev_probe_count == 0)
76 		rte_mempool_free(otx_cpt_meta_pool);
77 
78 	/* Free lock */
79 	rte_spinlock_unlock(&otx_probe_count_lock);
80 }
81 
82 /* Alarm routines */
83 
84 static void
85 otx_cpt_alarm_cb(void *arg)
86 {
87 	struct cpt_vf *cptvf = arg;
88 	otx_cpt_poll_misc(cptvf);
89 	rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
90 			  otx_cpt_alarm_cb, cptvf);
91 }
92 
93 static int
94 otx_cpt_periodic_alarm_start(void *arg)
95 {
96 	return rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
97 				 otx_cpt_alarm_cb, arg);
98 }
99 
100 static int
101 otx_cpt_periodic_alarm_stop(void *arg)
102 {
103 	return rte_eal_alarm_cancel(otx_cpt_alarm_cb, arg);
104 }
105 
106 /* PMD ops */
107 
108 static int
109 otx_cpt_dev_config(struct rte_cryptodev *dev __rte_unused,
110 		   struct rte_cryptodev_config *config __rte_unused)
111 {
112 	CPT_PMD_INIT_FUNC_TRACE();
113 	return 0;
114 }
115 
116 static int
117 otx_cpt_dev_start(struct rte_cryptodev *c_dev)
118 {
119 	void *cptvf = c_dev->data->dev_private;
120 
121 	CPT_PMD_INIT_FUNC_TRACE();
122 
123 	return otx_cpt_start_device(cptvf);
124 }
125 
126 static void
127 otx_cpt_dev_stop(struct rte_cryptodev *c_dev)
128 {
129 	void *cptvf = c_dev->data->dev_private;
130 
131 	CPT_PMD_INIT_FUNC_TRACE();
132 
133 	otx_cpt_stop_device(cptvf);
134 }
135 
136 static int
137 otx_cpt_dev_close(struct rte_cryptodev *c_dev)
138 {
139 	void *cptvf = c_dev->data->dev_private;
140 	int i, ret;
141 
142 	CPT_PMD_INIT_FUNC_TRACE();
143 
144 	for (i = 0; i < c_dev->data->nb_queue_pairs; i++) {
145 		ret = otx_cpt_que_pair_release(c_dev, i);
146 		if (ret)
147 			return ret;
148 	}
149 
150 	otx_cpt_periodic_alarm_stop(cptvf);
151 	otx_cpt_deinit_device(cptvf);
152 
153 	return 0;
154 }
155 
156 static void
157 otx_cpt_dev_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *info)
158 {
159 	CPT_PMD_INIT_FUNC_TRACE();
160 	if (info != NULL) {
161 		info->max_nb_queue_pairs = CPT_NUM_QS_PER_VF;
162 		info->feature_flags = dev->feature_flags;
163 		info->capabilities = otx_get_capabilities();
164 		info->sym.max_nb_sessions = 0;
165 		info->driver_id = otx_cryptodev_driver_id;
166 		info->min_mbuf_headroom_req = OTX_CPT_MIN_HEADROOM_REQ;
167 		info->min_mbuf_tailroom_req = OTX_CPT_MIN_TAILROOM_REQ;
168 	}
169 }
170 
171 static void
172 otx_cpt_stats_get(struct rte_cryptodev *dev __rte_unused,
173 		  struct rte_cryptodev_stats *stats __rte_unused)
174 {
175 	CPT_PMD_INIT_FUNC_TRACE();
176 }
177 
178 static void
179 otx_cpt_stats_reset(struct rte_cryptodev *dev __rte_unused)
180 {
181 	CPT_PMD_INIT_FUNC_TRACE();
182 }
183 
184 static int
185 otx_cpt_que_pair_setup(struct rte_cryptodev *dev,
186 		       uint16_t que_pair_id,
187 		       const struct rte_cryptodev_qp_conf *qp_conf,
188 		       int socket_id __rte_unused)
189 {
190 	void *cptvf = dev->data->dev_private;
191 	struct cpt_instance *instance = NULL;
192 	struct rte_pci_device *pci_dev;
193 	int ret = -1;
194 
195 	CPT_PMD_INIT_FUNC_TRACE();
196 
197 	if (dev->data->queue_pairs[que_pair_id] != NULL) {
198 		ret = otx_cpt_que_pair_release(dev, que_pair_id);
199 		if (ret)
200 			return ret;
201 	}
202 
203 	if (qp_conf->nb_descriptors > DEFAULT_CMD_QLEN) {
204 		CPT_LOG_INFO("Number of descriptors too big %d, using default "
205 			     "queue length of %d", qp_conf->nb_descriptors,
206 			     DEFAULT_CMD_QLEN);
207 	}
208 
209 	pci_dev = RTE_DEV_TO_PCI(dev->device);
210 
211 	if (pci_dev->mem_resource[0].addr == NULL) {
212 		CPT_LOG_ERR("PCI mem address null");
213 		return -EIO;
214 	}
215 
216 	ret = otx_cpt_get_resource(cptvf, 0, &instance);
217 	if (ret != 0 || instance == NULL) {
218 		CPT_LOG_ERR("Error getting instance handle from device %s : "
219 			    "ret = %d", dev->data->name, ret);
220 		return ret;
221 	}
222 
223 	instance->queue_id = que_pair_id;
224 	instance->sess_mp = qp_conf->mp_session;
225 	instance->sess_mp_priv = qp_conf->mp_session_private;
226 	dev->data->queue_pairs[que_pair_id] = instance;
227 
228 	return 0;
229 }
230 
231 static int
232 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id)
233 {
234 	struct cpt_instance *instance = dev->data->queue_pairs[que_pair_id];
235 	int ret;
236 
237 	CPT_PMD_INIT_FUNC_TRACE();
238 
239 	ret = otx_cpt_put_resource(instance);
240 	if (ret != 0) {
241 		CPT_LOG_ERR("Error putting instance handle of device %s : "
242 			    "ret = %d", dev->data->name, ret);
243 		return ret;
244 	}
245 
246 	dev->data->queue_pairs[que_pair_id] = NULL;
247 
248 	return 0;
249 }
250 
251 static unsigned int
252 otx_cpt_get_session_size(struct rte_cryptodev *dev __rte_unused)
253 {
254 	return cpt_get_session_size();
255 }
256 
257 static void
258 otx_cpt_session_init(void *sym_sess, uint8_t driver_id)
259 {
260 	struct rte_cryptodev_sym_session *sess = sym_sess;
261 	struct cpt_sess_misc *cpt_sess =
262 	 (struct cpt_sess_misc *) get_sym_session_private_data(sess, driver_id);
263 
264 	CPT_PMD_INIT_FUNC_TRACE();
265 	cpt_sess->ctx_dma_addr = rte_mempool_virt2iova(cpt_sess) +
266 			sizeof(struct cpt_sess_misc);
267 }
268 
269 static int
270 otx_cpt_session_cfg(struct rte_cryptodev *dev,
271 		    struct rte_crypto_sym_xform *xform,
272 		    struct rte_cryptodev_sym_session *sess,
273 		    struct rte_mempool *mempool)
274 {
275 	struct rte_crypto_sym_xform *chain;
276 	void *sess_private_data = NULL;
277 
278 	CPT_PMD_INIT_FUNC_TRACE();
279 
280 	if (cpt_is_algo_supported(xform))
281 		goto err;
282 
283 	if (unlikely(sess == NULL)) {
284 		CPT_LOG_ERR("invalid session struct");
285 		return -EINVAL;
286 	}
287 
288 	if (rte_mempool_get(mempool, &sess_private_data)) {
289 		CPT_LOG_ERR("Could not allocate sess_private_data");
290 		return -ENOMEM;
291 	}
292 
293 	chain = xform;
294 	while (chain) {
295 		switch (chain->type) {
296 		case RTE_CRYPTO_SYM_XFORM_AEAD:
297 			if (fill_sess_aead(chain, sess_private_data))
298 				goto err;
299 			break;
300 		case RTE_CRYPTO_SYM_XFORM_CIPHER:
301 			if (fill_sess_cipher(chain, sess_private_data))
302 				goto err;
303 			break;
304 		case RTE_CRYPTO_SYM_XFORM_AUTH:
305 			if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
306 				if (fill_sess_gmac(chain, sess_private_data))
307 					goto err;
308 			} else {
309 				if (fill_sess_auth(chain, sess_private_data))
310 					goto err;
311 			}
312 			break;
313 		default:
314 			CPT_LOG_ERR("Invalid crypto xform type");
315 			break;
316 		}
317 		chain = chain->next;
318 	}
319 	set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
320 	otx_cpt_session_init(sess, dev->driver_id);
321 	return 0;
322 
323 err:
324 	if (sess_private_data)
325 		rte_mempool_put(mempool, sess_private_data);
326 	return -EPERM;
327 }
328 
329 static void
330 otx_cpt_session_clear(struct rte_cryptodev *dev,
331 		  struct rte_cryptodev_sym_session *sess)
332 {
333 	void *sess_priv = get_sym_session_private_data(sess, dev->driver_id);
334 
335 	CPT_PMD_INIT_FUNC_TRACE();
336 	if (sess_priv) {
337 		memset(sess_priv, 0, otx_cpt_get_session_size(dev));
338 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
339 		set_sym_session_private_data(sess, dev->driver_id, NULL);
340 		rte_mempool_put(sess_mp, sess_priv);
341 	}
342 }
343 
344 static __rte_always_inline int32_t __hot
345 otx_cpt_request_enqueue(struct cpt_instance *instance,
346 			struct pending_queue *pqueue,
347 			void *req)
348 {
349 	struct cpt_request_info *user_req = (struct cpt_request_info *)req;
350 
351 	if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
352 		return -EAGAIN;
353 
354 	fill_cpt_inst(instance, req);
355 
356 	CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
357 
358 	/* Fill time_out cycles */
359 	user_req->time_out = rte_get_timer_cycles() +
360 			DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
361 	user_req->extra_time = 0;
362 
363 	/* Default mode of software queue */
364 	mark_cpt_inst(instance);
365 
366 	pqueue->rid_queue[pqueue->enq_tail].rid = (uintptr_t)user_req;
367 
368 	/* We will use soft queue length here to limit requests */
369 	MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
370 	pqueue->pending_count += 1;
371 
372 	CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
373 			 "op: %p", user_req, user_req->op);
374 	return 0;
375 }
376 
377 static __rte_always_inline int __hot
378 otx_cpt_enq_single_sym(struct cpt_instance *instance,
379 		       struct rte_crypto_op *op,
380 		       struct pending_queue *pqueue)
381 {
382 	struct cpt_sess_misc *sess;
383 	struct rte_crypto_sym_op *sym_op = op->sym;
384 	void *prep_req, *mdata = NULL;
385 	int ret = 0;
386 	uint64_t cpt_op;
387 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
388 
389 	sess = (struct cpt_sess_misc *)
390 			get_sym_session_private_data(sym_op->session,
391 						     otx_cryptodev_driver_id);
392 
393 	cpt_op = sess->cpt_op;
394 
395 	if (likely(cpt_op & CPT_OP_CIPHER_MASK))
396 		ret = fill_fc_params(op, sess, &cptvf->meta_info, &mdata,
397 				     &prep_req);
398 	else
399 		ret = fill_digest_params(op, sess, &cptvf->meta_info,
400 					 &mdata, &prep_req);
401 
402 	if (unlikely(ret)) {
403 		CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
404 			       "ret 0x%x", op, (unsigned int)cpt_op, ret);
405 		return ret;
406 	}
407 
408 	/* Enqueue prepared instruction to h/w */
409 	ret = otx_cpt_request_enqueue(instance, pqueue, prep_req);
410 
411 	if (unlikely(ret)) {
412 		/* Buffer allocated for request preparation need to be freed */
413 		free_op_meta(mdata, cptvf->meta_info.cptvf_meta_pool);
414 		return ret;
415 	}
416 
417 	return 0;
418 }
419 
420 static __rte_always_inline int __hot
421 otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
422 				struct rte_crypto_op *op,
423 				struct pending_queue *pqueue)
424 {
425 	struct cpt_sess_misc *sess;
426 	struct rte_crypto_sym_op *sym_op = op->sym;
427 	int ret;
428 	void *sess_t = NULL;
429 	void *sess_private_data_t = NULL;
430 
431 	/* Create tmp session */
432 
433 	if (rte_mempool_get(instance->sess_mp, (void **)&sess_t)) {
434 		ret = -ENOMEM;
435 		goto exit;
436 	}
437 
438 	if (rte_mempool_get(instance->sess_mp_priv,
439 			(void **)&sess_private_data_t)) {
440 		ret = -ENOMEM;
441 		goto free_sess;
442 	}
443 
444 	sess = (struct cpt_sess_misc *)sess_private_data_t;
445 
446 	sess->ctx_dma_addr = rte_mempool_virt2iova(sess) +
447 			sizeof(struct cpt_sess_misc);
448 
449 	ret = instance_session_cfg(sym_op->xform, (void *)sess);
450 	if (unlikely(ret)) {
451 		ret = -EINVAL;
452 		goto free_sess_priv;
453 	}
454 
455 	/* Save tmp session in op */
456 
457 	sym_op->session = (struct rte_cryptodev_sym_session *)sess_t;
458 	set_sym_session_private_data(sym_op->session, otx_cryptodev_driver_id,
459 				     sess_private_data_t);
460 
461 	/* Enqueue op with the tmp session set */
462 	ret = otx_cpt_enq_single_sym(instance, op, pqueue);
463 
464 	if (unlikely(ret))
465 		goto free_sess_priv;
466 
467 	return 0;
468 
469 free_sess_priv:
470 	rte_mempool_put(instance->sess_mp_priv, sess_private_data_t);
471 free_sess:
472 	rte_mempool_put(instance->sess_mp, sess_t);
473 exit:
474 	return ret;
475 }
476 
477 static __rte_always_inline int __hot
478 otx_cpt_enq_single(struct cpt_instance *inst,
479 		   struct rte_crypto_op *op,
480 		   struct pending_queue *pqueue)
481 {
482 	/* Check for the type */
483 
484 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
485 		return otx_cpt_enq_single_sym(inst, op, pqueue);
486 	else if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS))
487 		return otx_cpt_enq_single_sym_sessless(inst, op, pqueue);
488 
489 	/* Should not reach here */
490 	return -EINVAL;
491 }
492 
493 static uint16_t
494 otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
495 {
496 	struct cpt_instance *instance = (struct cpt_instance *)qptr;
497 	uint16_t count;
498 	int ret;
499 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
500 	struct pending_queue *pqueue = &cptvf->pqueue;
501 
502 	count = DEFAULT_CMD_QLEN - pqueue->pending_count;
503 	if (nb_ops > count)
504 		nb_ops = count;
505 
506 	count = 0;
507 	while (likely(count < nb_ops)) {
508 
509 		/* Enqueue single op */
510 		ret = otx_cpt_enq_single(instance, ops[count], pqueue);
511 
512 		if (unlikely(ret))
513 			break;
514 		count++;
515 	}
516 	otx_cpt_ring_dbell(instance, count);
517 	return count;
518 }
519 
520 static __rte_always_inline void
521 otx_cpt_dequeue_post_process(struct rte_crypto_op *cop, uintptr_t *rsp)
522 {
523 	/* H/w has returned success */
524 	cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
525 
526 	/* Perform further post processing */
527 
528 	if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
529 		/* Check if auth verify need to be completed */
530 		if (unlikely(rsp[2]))
531 			compl_auth_verify(cop, (uint8_t *)rsp[2], rsp[3]);
532 		return;
533 	}
534 }
535 
536 static uint16_t
537 otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
538 {
539 	struct cpt_instance *instance = (struct cpt_instance *)qptr;
540 	struct cpt_request_info *user_req;
541 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
542 	struct rid *rid_e;
543 	uint8_t cc[nb_ops];
544 	int i, count, pcount;
545 	uint8_t ret;
546 	int nb_completed;
547 	struct pending_queue *pqueue = &cptvf->pqueue;
548 	struct rte_crypto_op *cop;
549 	void *metabuf;
550 	uintptr_t *rsp;
551 
552 	pcount = pqueue->pending_count;
553 	count = (nb_ops > pcount) ? pcount : nb_ops;
554 
555 	for (i = 0; i < count; i++) {
556 		rid_e = &pqueue->rid_queue[pqueue->deq_head];
557 		user_req = (struct cpt_request_info *)(rid_e->rid);
558 
559 		if (likely((i+1) < count))
560 			rte_prefetch_non_temporal((void *)rid_e[1].rid);
561 
562 		ret = check_nb_command_id(user_req, instance);
563 
564 		if (unlikely(ret == ERR_REQ_PENDING)) {
565 			/* Stop checking for completions */
566 			break;
567 		}
568 
569 		/* Return completion code and op handle */
570 		cc[i] = ret;
571 		ops[i] = user_req->op;
572 
573 		CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d",
574 				 user_req, user_req->op, ret);
575 
576 		MOD_INC(pqueue->deq_head, DEFAULT_CMD_QLEN);
577 		pqueue->pending_count -= 1;
578 	}
579 
580 	nb_completed = i;
581 
582 	for (i = 0; i < nb_completed; i++) {
583 
584 		rsp = (void *)ops[i];
585 
586 		if (likely((i + 1) < nb_completed))
587 			rte_prefetch0(ops[i+1]);
588 
589 		metabuf = (void *)rsp[0];
590 		cop = (void *)rsp[1];
591 
592 		ops[i] = cop;
593 
594 		/* Check completion code */
595 
596 		if (likely(cc[i] == 0)) {
597 			/* H/w success pkt. Post process */
598 			otx_cpt_dequeue_post_process(cop, rsp);
599 		} else if (cc[i] == ERR_GC_ICV_MISCOMPARE) {
600 			/* auth data mismatch */
601 			cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
602 		} else {
603 			/* Error */
604 			cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
605 		}
606 
607 		if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
608 			void *sess_private_data_t =
609 				get_sym_session_private_data(cop->sym->session,
610 						otx_cryptodev_driver_id);
611 			memset(sess_private_data_t, 0,
612 					cpt_get_session_size());
613 			memset(cop->sym->session, 0,
614 			rte_cryptodev_sym_get_existing_header_session_size(
615 					cop->sym->session));
616 			rte_mempool_put(instance->sess_mp_priv,
617 					sess_private_data_t);
618 			rte_mempool_put(instance->sess_mp, cop->sym->session);
619 			cop->sym->session = NULL;
620 		}
621 		free_op_meta(metabuf, cptvf->meta_info.cptvf_meta_pool);
622 	}
623 
624 	return nb_completed;
625 }
626 
627 static struct rte_cryptodev_ops cptvf_ops = {
628 	/* Device related operations */
629 	.dev_configure = otx_cpt_dev_config,
630 	.dev_start = otx_cpt_dev_start,
631 	.dev_stop = otx_cpt_dev_stop,
632 	.dev_close = otx_cpt_dev_close,
633 	.dev_infos_get = otx_cpt_dev_info_get,
634 
635 	.stats_get = otx_cpt_stats_get,
636 	.stats_reset = otx_cpt_stats_reset,
637 	.queue_pair_setup = otx_cpt_que_pair_setup,
638 	.queue_pair_release = otx_cpt_que_pair_release,
639 	.queue_pair_count = NULL,
640 
641 	/* Crypto related operations */
642 	.sym_session_get_size = otx_cpt_get_session_size,
643 	.sym_session_configure = otx_cpt_session_cfg,
644 	.sym_session_clear = otx_cpt_session_clear
645 };
646 
647 static void
648 otx_cpt_common_vars_init(struct cpt_vf *cptvf)
649 {
650 	cptvf->meta_info.cptvf_meta_pool = otx_cpt_meta_pool;
651 	cptvf->meta_info.cptvf_op_mlen = otx_cpt_op_mlen;
652 	cptvf->meta_info.cptvf_op_sb_mlen = otx_cpt_op_sb_mlen;
653 }
654 
655 int
656 otx_cpt_dev_create(struct rte_cryptodev *c_dev)
657 {
658 	struct rte_pci_device *pdev = RTE_DEV_TO_PCI(c_dev->device);
659 	struct cpt_vf *cptvf = NULL;
660 	void *reg_base;
661 	char dev_name[32];
662 	int ret;
663 
664 	if (pdev->mem_resource[0].phys_addr == 0ULL)
665 		return -EIO;
666 
667 	/* for secondary processes, we don't initialise any further as primary
668 	 * has already done this work.
669 	 */
670 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
671 		return 0;
672 
673 	cptvf = rte_zmalloc_socket("otx_cryptodev_private_mem",
674 			sizeof(struct cpt_vf), RTE_CACHE_LINE_SIZE,
675 			rte_socket_id());
676 
677 	if (cptvf == NULL) {
678 		CPT_LOG_ERR("Cannot allocate memory for device private data");
679 		return -ENOMEM;
680 	}
681 
682 	snprintf(dev_name, 32, "%02x:%02x.%x",
683 			pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
684 
685 	reg_base = pdev->mem_resource[0].addr;
686 	if (!reg_base) {
687 		CPT_LOG_ERR("Failed to map BAR0 of %s", dev_name);
688 		ret = -ENODEV;
689 		goto fail;
690 	}
691 
692 	ret = otx_cpt_hw_init(cptvf, pdev, reg_base, dev_name);
693 	if (ret) {
694 		CPT_LOG_ERR("Failed to init cptvf %s", dev_name);
695 		ret = -EIO;
696 		goto fail;
697 	}
698 
699 	/* Start off timer for mailbox interrupts */
700 	otx_cpt_periodic_alarm_start(cptvf);
701 
702 	rte_spinlock_lock(&otx_probe_count_lock);
703 	if (!otx_cryptodev_probe_count) {
704 		ret = init_global_resources();
705 		if (ret) {
706 			rte_spinlock_unlock(&otx_probe_count_lock);
707 			goto init_fail;
708 		}
709 	}
710 	otx_cryptodev_probe_count++;
711 	rte_spinlock_unlock(&otx_probe_count_lock);
712 
713 	/* Initialize data path variables used by common code */
714 	otx_cpt_common_vars_init(cptvf);
715 
716 	c_dev->dev_ops = &cptvf_ops;
717 
718 	c_dev->enqueue_burst = otx_cpt_pkt_enqueue;
719 	c_dev->dequeue_burst = otx_cpt_pkt_dequeue;
720 
721 	c_dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
722 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
723 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
724 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
725 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
726 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT;
727 
728 	/* Save dev private data */
729 	c_dev->data->dev_private = cptvf;
730 
731 	return 0;
732 
733 init_fail:
734 	otx_cpt_periodic_alarm_stop(cptvf);
735 	otx_cpt_deinit_device(cptvf);
736 
737 fail:
738 	if (cptvf) {
739 		/* Free private data allocated */
740 		rte_free(cptvf);
741 	}
742 
743 	return ret;
744 }
745