xref: /dpdk/drivers/crypto/octeontx/otx_cryptodev_ops.c (revision c39d1e082a4b426e915074ce30eb6f410ee2654a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4 
5 #include <rte_alarm.h>
6 #include <rte_bus_pci.h>
7 #include <rte_cryptodev.h>
8 #include <rte_cryptodev_pmd.h>
9 #include <rte_errno.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 
13 #include "otx_cryptodev.h"
14 #include "otx_cryptodev_capabilities.h"
15 #include "otx_cryptodev_hw_access.h"
16 #include "otx_cryptodev_ops.h"
17 
18 #include "cpt_pmd_logs.h"
19 #include "cpt_ucode.h"
20 
21 /* Forward declarations */
22 
23 static int
24 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id);
25 
26 /* Alarm routines */
27 
28 static void
29 otx_cpt_alarm_cb(void *arg)
30 {
31 	struct cpt_vf *cptvf = arg;
32 	otx_cpt_poll_misc(cptvf);
33 	rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
34 			  otx_cpt_alarm_cb, cptvf);
35 }
36 
37 static int
38 otx_cpt_periodic_alarm_start(void *arg)
39 {
40 	return rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
41 				 otx_cpt_alarm_cb, arg);
42 }
43 
44 static int
45 otx_cpt_periodic_alarm_stop(void *arg)
46 {
47 	return rte_eal_alarm_cancel(otx_cpt_alarm_cb, arg);
48 }
49 
50 /* PMD ops */
51 
52 static int
53 otx_cpt_dev_config(struct rte_cryptodev *dev __rte_unused,
54 		   struct rte_cryptodev_config *config __rte_unused)
55 {
56 	CPT_PMD_INIT_FUNC_TRACE();
57 	return 0;
58 }
59 
60 static int
61 otx_cpt_dev_start(struct rte_cryptodev *c_dev)
62 {
63 	void *cptvf = c_dev->data->dev_private;
64 
65 	CPT_PMD_INIT_FUNC_TRACE();
66 
67 	return otx_cpt_start_device(cptvf);
68 }
69 
70 static void
71 otx_cpt_dev_stop(struct rte_cryptodev *c_dev)
72 {
73 	void *cptvf = c_dev->data->dev_private;
74 
75 	CPT_PMD_INIT_FUNC_TRACE();
76 
77 	otx_cpt_stop_device(cptvf);
78 }
79 
80 static int
81 otx_cpt_dev_close(struct rte_cryptodev *c_dev)
82 {
83 	void *cptvf = c_dev->data->dev_private;
84 	int i, ret;
85 
86 	CPT_PMD_INIT_FUNC_TRACE();
87 
88 	for (i = 0; i < c_dev->data->nb_queue_pairs; i++) {
89 		ret = otx_cpt_que_pair_release(c_dev, i);
90 		if (ret)
91 			return ret;
92 	}
93 
94 	otx_cpt_periodic_alarm_stop(cptvf);
95 	otx_cpt_deinit_device(cptvf);
96 
97 	return 0;
98 }
99 
100 static void
101 otx_cpt_dev_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *info)
102 {
103 	CPT_PMD_INIT_FUNC_TRACE();
104 	if (info != NULL) {
105 		info->max_nb_queue_pairs = CPT_NUM_QS_PER_VF;
106 		info->feature_flags = dev->feature_flags;
107 		info->capabilities = otx_get_capabilities();
108 		info->sym.max_nb_sessions = 0;
109 		info->driver_id = otx_cryptodev_driver_id;
110 		info->min_mbuf_headroom_req = OTX_CPT_MIN_HEADROOM_REQ;
111 		info->min_mbuf_tailroom_req = OTX_CPT_MIN_TAILROOM_REQ;
112 	}
113 }
114 
115 static void
116 otx_cpt_stats_get(struct rte_cryptodev *dev __rte_unused,
117 		  struct rte_cryptodev_stats *stats __rte_unused)
118 {
119 	CPT_PMD_INIT_FUNC_TRACE();
120 }
121 
122 static void
123 otx_cpt_stats_reset(struct rte_cryptodev *dev __rte_unused)
124 {
125 	CPT_PMD_INIT_FUNC_TRACE();
126 }
127 
128 static int
129 otx_cpt_que_pair_setup(struct rte_cryptodev *dev,
130 		       uint16_t que_pair_id,
131 		       const struct rte_cryptodev_qp_conf *qp_conf,
132 		       int socket_id __rte_unused)
133 {
134 	struct cpt_instance *instance = NULL;
135 	struct rte_pci_device *pci_dev;
136 	int ret = -1;
137 
138 	CPT_PMD_INIT_FUNC_TRACE();
139 
140 	if (dev->data->queue_pairs[que_pair_id] != NULL) {
141 		ret = otx_cpt_que_pair_release(dev, que_pair_id);
142 		if (ret)
143 			return ret;
144 	}
145 
146 	if (qp_conf->nb_descriptors > DEFAULT_CMD_QLEN) {
147 		CPT_LOG_INFO("Number of descriptors too big %d, using default "
148 			     "queue length of %d", qp_conf->nb_descriptors,
149 			     DEFAULT_CMD_QLEN);
150 	}
151 
152 	pci_dev = RTE_DEV_TO_PCI(dev->device);
153 
154 	if (pci_dev->mem_resource[0].addr == NULL) {
155 		CPT_LOG_ERR("PCI mem address null");
156 		return -EIO;
157 	}
158 
159 	ret = otx_cpt_get_resource(dev, 0, &instance, que_pair_id);
160 	if (ret != 0 || instance == NULL) {
161 		CPT_LOG_ERR("Error getting instance handle from device %s : "
162 			    "ret = %d", dev->data->name, ret);
163 		return ret;
164 	}
165 
166 	instance->queue_id = que_pair_id;
167 	instance->sess_mp = qp_conf->mp_session;
168 	instance->sess_mp_priv = qp_conf->mp_session_private;
169 	dev->data->queue_pairs[que_pair_id] = instance;
170 
171 	return 0;
172 }
173 
174 static int
175 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id)
176 {
177 	struct cpt_instance *instance = dev->data->queue_pairs[que_pair_id];
178 	int ret;
179 
180 	CPT_PMD_INIT_FUNC_TRACE();
181 
182 	ret = otx_cpt_put_resource(instance);
183 	if (ret != 0) {
184 		CPT_LOG_ERR("Error putting instance handle of device %s : "
185 			    "ret = %d", dev->data->name, ret);
186 		return ret;
187 	}
188 
189 	dev->data->queue_pairs[que_pair_id] = NULL;
190 
191 	return 0;
192 }
193 
194 static unsigned int
195 otx_cpt_get_session_size(struct rte_cryptodev *dev __rte_unused)
196 {
197 	return cpt_get_session_size();
198 }
199 
200 static void
201 otx_cpt_session_init(void *sym_sess, uint8_t driver_id)
202 {
203 	struct rte_cryptodev_sym_session *sess = sym_sess;
204 	struct cpt_sess_misc *cpt_sess =
205 	 (struct cpt_sess_misc *) get_sym_session_private_data(sess, driver_id);
206 
207 	CPT_PMD_INIT_FUNC_TRACE();
208 	cpt_sess->ctx_dma_addr = rte_mempool_virt2iova(cpt_sess) +
209 			sizeof(struct cpt_sess_misc);
210 }
211 
212 static int
213 otx_cpt_session_cfg(struct rte_cryptodev *dev,
214 		    struct rte_crypto_sym_xform *xform,
215 		    struct rte_cryptodev_sym_session *sess,
216 		    struct rte_mempool *mempool)
217 {
218 	struct rte_crypto_sym_xform *chain;
219 	void *sess_private_data = NULL;
220 
221 	CPT_PMD_INIT_FUNC_TRACE();
222 
223 	if (cpt_is_algo_supported(xform))
224 		goto err;
225 
226 	if (unlikely(sess == NULL)) {
227 		CPT_LOG_ERR("invalid session struct");
228 		return -EINVAL;
229 	}
230 
231 	if (rte_mempool_get(mempool, &sess_private_data)) {
232 		CPT_LOG_ERR("Could not allocate sess_private_data");
233 		return -ENOMEM;
234 	}
235 
236 	chain = xform;
237 	while (chain) {
238 		switch (chain->type) {
239 		case RTE_CRYPTO_SYM_XFORM_AEAD:
240 			if (fill_sess_aead(chain, sess_private_data))
241 				goto err;
242 			break;
243 		case RTE_CRYPTO_SYM_XFORM_CIPHER:
244 			if (fill_sess_cipher(chain, sess_private_data))
245 				goto err;
246 			break;
247 		case RTE_CRYPTO_SYM_XFORM_AUTH:
248 			if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
249 				if (fill_sess_gmac(chain, sess_private_data))
250 					goto err;
251 			} else {
252 				if (fill_sess_auth(chain, sess_private_data))
253 					goto err;
254 			}
255 			break;
256 		default:
257 			CPT_LOG_ERR("Invalid crypto xform type");
258 			break;
259 		}
260 		chain = chain->next;
261 	}
262 	set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
263 	otx_cpt_session_init(sess, dev->driver_id);
264 	return 0;
265 
266 err:
267 	if (sess_private_data)
268 		rte_mempool_put(mempool, sess_private_data);
269 	return -EPERM;
270 }
271 
272 static void
273 otx_cpt_session_clear(struct rte_cryptodev *dev,
274 		  struct rte_cryptodev_sym_session *sess)
275 {
276 	void *sess_priv = get_sym_session_private_data(sess, dev->driver_id);
277 
278 	CPT_PMD_INIT_FUNC_TRACE();
279 	if (sess_priv) {
280 		memset(sess_priv, 0, otx_cpt_get_session_size(dev));
281 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
282 		set_sym_session_private_data(sess, dev->driver_id, NULL);
283 		rte_mempool_put(sess_mp, sess_priv);
284 	}
285 }
286 
287 static __rte_always_inline int32_t __hot
288 otx_cpt_request_enqueue(struct cpt_instance *instance,
289 			struct pending_queue *pqueue,
290 			void *req)
291 {
292 	struct cpt_request_info *user_req = (struct cpt_request_info *)req;
293 
294 	if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
295 		return -EAGAIN;
296 
297 	fill_cpt_inst(instance, req);
298 
299 	CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
300 
301 	/* Fill time_out cycles */
302 	user_req->time_out = rte_get_timer_cycles() +
303 			DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
304 	user_req->extra_time = 0;
305 
306 	/* Default mode of software queue */
307 	mark_cpt_inst(instance);
308 
309 	pqueue->rid_queue[pqueue->enq_tail].rid = (uintptr_t)user_req;
310 
311 	/* We will use soft queue length here to limit requests */
312 	MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
313 	pqueue->pending_count += 1;
314 
315 	CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
316 			 "op: %p", user_req, user_req->op);
317 	return 0;
318 }
319 
320 static __rte_always_inline int __hot
321 otx_cpt_enq_single_sym(struct cpt_instance *instance,
322 		       struct rte_crypto_op *op,
323 		       struct pending_queue *pqueue)
324 {
325 	struct cpt_sess_misc *sess;
326 	struct rte_crypto_sym_op *sym_op = op->sym;
327 	void *prep_req, *mdata = NULL;
328 	int ret = 0;
329 	uint64_t cpt_op;
330 
331 	sess = (struct cpt_sess_misc *)
332 			get_sym_session_private_data(sym_op->session,
333 						     otx_cryptodev_driver_id);
334 
335 	cpt_op = sess->cpt_op;
336 
337 	if (likely(cpt_op & CPT_OP_CIPHER_MASK))
338 		ret = fill_fc_params(op, sess, &instance->meta_info, &mdata,
339 				     &prep_req);
340 	else
341 		ret = fill_digest_params(op, sess, &instance->meta_info,
342 					 &mdata, &prep_req);
343 
344 	if (unlikely(ret)) {
345 		CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
346 			       "ret 0x%x", op, (unsigned int)cpt_op, ret);
347 		return ret;
348 	}
349 
350 	/* Enqueue prepared instruction to h/w */
351 	ret = otx_cpt_request_enqueue(instance, pqueue, prep_req);
352 
353 	if (unlikely(ret)) {
354 		/* Buffer allocated for request preparation need to be freed */
355 		free_op_meta(mdata, instance->meta_info.pool);
356 		return ret;
357 	}
358 
359 	return 0;
360 }
361 
362 static __rte_always_inline int __hot
363 otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
364 				struct rte_crypto_op *op,
365 				struct pending_queue *pqueue)
366 {
367 	struct cpt_sess_misc *sess;
368 	struct rte_crypto_sym_op *sym_op = op->sym;
369 	int ret;
370 	void *sess_t = NULL;
371 	void *sess_private_data_t = NULL;
372 
373 	/* Create tmp session */
374 
375 	if (rte_mempool_get(instance->sess_mp, (void **)&sess_t)) {
376 		ret = -ENOMEM;
377 		goto exit;
378 	}
379 
380 	if (rte_mempool_get(instance->sess_mp_priv,
381 			(void **)&sess_private_data_t)) {
382 		ret = -ENOMEM;
383 		goto free_sess;
384 	}
385 
386 	sess = (struct cpt_sess_misc *)sess_private_data_t;
387 
388 	sess->ctx_dma_addr = rte_mempool_virt2iova(sess) +
389 			sizeof(struct cpt_sess_misc);
390 
391 	ret = instance_session_cfg(sym_op->xform, (void *)sess);
392 	if (unlikely(ret)) {
393 		ret = -EINVAL;
394 		goto free_sess_priv;
395 	}
396 
397 	/* Save tmp session in op */
398 
399 	sym_op->session = (struct rte_cryptodev_sym_session *)sess_t;
400 	set_sym_session_private_data(sym_op->session, otx_cryptodev_driver_id,
401 				     sess_private_data_t);
402 
403 	/* Enqueue op with the tmp session set */
404 	ret = otx_cpt_enq_single_sym(instance, op, pqueue);
405 
406 	if (unlikely(ret))
407 		goto free_sess_priv;
408 
409 	return 0;
410 
411 free_sess_priv:
412 	rte_mempool_put(instance->sess_mp_priv, sess_private_data_t);
413 free_sess:
414 	rte_mempool_put(instance->sess_mp, sess_t);
415 exit:
416 	return ret;
417 }
418 
419 static __rte_always_inline int __hot
420 otx_cpt_enq_single(struct cpt_instance *inst,
421 		   struct rte_crypto_op *op,
422 		   struct pending_queue *pqueue)
423 {
424 	/* Check for the type */
425 
426 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
427 		return otx_cpt_enq_single_sym(inst, op, pqueue);
428 	else if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS))
429 		return otx_cpt_enq_single_sym_sessless(inst, op, pqueue);
430 
431 	/* Should not reach here */
432 	return -EINVAL;
433 }
434 
435 static uint16_t
436 otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
437 {
438 	struct cpt_instance *instance = (struct cpt_instance *)qptr;
439 	uint16_t count;
440 	int ret;
441 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
442 	struct pending_queue *pqueue = &cptvf->pqueue;
443 
444 	count = DEFAULT_CMD_QLEN - pqueue->pending_count;
445 	if (nb_ops > count)
446 		nb_ops = count;
447 
448 	count = 0;
449 	while (likely(count < nb_ops)) {
450 
451 		/* Enqueue single op */
452 		ret = otx_cpt_enq_single(instance, ops[count], pqueue);
453 
454 		if (unlikely(ret))
455 			break;
456 		count++;
457 	}
458 	otx_cpt_ring_dbell(instance, count);
459 	return count;
460 }
461 
462 static __rte_always_inline void
463 otx_cpt_dequeue_post_process(struct rte_crypto_op *cop, uintptr_t *rsp)
464 {
465 	/* H/w has returned success */
466 	cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
467 
468 	/* Perform further post processing */
469 
470 	if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
471 		/* Check if auth verify need to be completed */
472 		if (unlikely(rsp[2]))
473 			compl_auth_verify(cop, (uint8_t *)rsp[2], rsp[3]);
474 		return;
475 	}
476 }
477 
478 static uint16_t
479 otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
480 {
481 	struct cpt_instance *instance = (struct cpt_instance *)qptr;
482 	struct cpt_request_info *user_req;
483 	struct cpt_vf *cptvf = (struct cpt_vf *)instance;
484 	struct rid *rid_e;
485 	uint8_t cc[nb_ops];
486 	int i, count, pcount;
487 	uint8_t ret;
488 	int nb_completed;
489 	struct pending_queue *pqueue = &cptvf->pqueue;
490 	struct rte_crypto_op *cop;
491 	void *metabuf;
492 	uintptr_t *rsp;
493 
494 	pcount = pqueue->pending_count;
495 	count = (nb_ops > pcount) ? pcount : nb_ops;
496 
497 	for (i = 0; i < count; i++) {
498 		rid_e = &pqueue->rid_queue[pqueue->deq_head];
499 		user_req = (struct cpt_request_info *)(rid_e->rid);
500 
501 		if (likely((i+1) < count))
502 			rte_prefetch_non_temporal((void *)rid_e[1].rid);
503 
504 		ret = check_nb_command_id(user_req, instance);
505 
506 		if (unlikely(ret == ERR_REQ_PENDING)) {
507 			/* Stop checking for completions */
508 			break;
509 		}
510 
511 		/* Return completion code and op handle */
512 		cc[i] = ret;
513 		ops[i] = user_req->op;
514 
515 		CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d",
516 				 user_req, user_req->op, ret);
517 
518 		MOD_INC(pqueue->deq_head, DEFAULT_CMD_QLEN);
519 		pqueue->pending_count -= 1;
520 	}
521 
522 	nb_completed = i;
523 
524 	for (i = 0; i < nb_completed; i++) {
525 
526 		rsp = (void *)ops[i];
527 
528 		if (likely((i + 1) < nb_completed))
529 			rte_prefetch0(ops[i+1]);
530 
531 		metabuf = (void *)rsp[0];
532 		cop = (void *)rsp[1];
533 
534 		ops[i] = cop;
535 
536 		/* Check completion code */
537 
538 		if (likely(cc[i] == 0)) {
539 			/* H/w success pkt. Post process */
540 			otx_cpt_dequeue_post_process(cop, rsp);
541 		} else if (cc[i] == ERR_GC_ICV_MISCOMPARE) {
542 			/* auth data mismatch */
543 			cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
544 		} else {
545 			/* Error */
546 			cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
547 		}
548 
549 		if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
550 			void *sess_private_data_t =
551 				get_sym_session_private_data(cop->sym->session,
552 						otx_cryptodev_driver_id);
553 			memset(sess_private_data_t, 0,
554 					cpt_get_session_size());
555 			memset(cop->sym->session, 0,
556 			rte_cryptodev_sym_get_existing_header_session_size(
557 					cop->sym->session));
558 			rte_mempool_put(instance->sess_mp_priv,
559 					sess_private_data_t);
560 			rte_mempool_put(instance->sess_mp, cop->sym->session);
561 			cop->sym->session = NULL;
562 		}
563 		free_op_meta(metabuf, instance->meta_info.pool);
564 	}
565 
566 	return nb_completed;
567 }
568 
569 static struct rte_cryptodev_ops cptvf_ops = {
570 	/* Device related operations */
571 	.dev_configure = otx_cpt_dev_config,
572 	.dev_start = otx_cpt_dev_start,
573 	.dev_stop = otx_cpt_dev_stop,
574 	.dev_close = otx_cpt_dev_close,
575 	.dev_infos_get = otx_cpt_dev_info_get,
576 
577 	.stats_get = otx_cpt_stats_get,
578 	.stats_reset = otx_cpt_stats_reset,
579 	.queue_pair_setup = otx_cpt_que_pair_setup,
580 	.queue_pair_release = otx_cpt_que_pair_release,
581 	.queue_pair_count = NULL,
582 
583 	/* Crypto related operations */
584 	.sym_session_get_size = otx_cpt_get_session_size,
585 	.sym_session_configure = otx_cpt_session_cfg,
586 	.sym_session_clear = otx_cpt_session_clear
587 };
588 
589 int
590 otx_cpt_dev_create(struct rte_cryptodev *c_dev)
591 {
592 	struct rte_pci_device *pdev = RTE_DEV_TO_PCI(c_dev->device);
593 	struct cpt_vf *cptvf = NULL;
594 	void *reg_base;
595 	char dev_name[32];
596 	int ret;
597 
598 	if (pdev->mem_resource[0].phys_addr == 0ULL)
599 		return -EIO;
600 
601 	/* for secondary processes, we don't initialise any further as primary
602 	 * has already done this work.
603 	 */
604 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
605 		return 0;
606 
607 	cptvf = rte_zmalloc_socket("otx_cryptodev_private_mem",
608 			sizeof(struct cpt_vf), RTE_CACHE_LINE_SIZE,
609 			rte_socket_id());
610 
611 	if (cptvf == NULL) {
612 		CPT_LOG_ERR("Cannot allocate memory for device private data");
613 		return -ENOMEM;
614 	}
615 
616 	snprintf(dev_name, 32, "%02x:%02x.%x",
617 			pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
618 
619 	reg_base = pdev->mem_resource[0].addr;
620 	if (!reg_base) {
621 		CPT_LOG_ERR("Failed to map BAR0 of %s", dev_name);
622 		ret = -ENODEV;
623 		goto fail;
624 	}
625 
626 	ret = otx_cpt_hw_init(cptvf, pdev, reg_base, dev_name);
627 	if (ret) {
628 		CPT_LOG_ERR("Failed to init cptvf %s", dev_name);
629 		ret = -EIO;
630 		goto fail;
631 	}
632 
633 	/* Start off timer for mailbox interrupts */
634 	otx_cpt_periodic_alarm_start(cptvf);
635 
636 	c_dev->dev_ops = &cptvf_ops;
637 
638 	c_dev->enqueue_burst = otx_cpt_pkt_enqueue;
639 	c_dev->dequeue_burst = otx_cpt_pkt_dequeue;
640 
641 	c_dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
642 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
643 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
644 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
645 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
646 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT;
647 
648 	/* Save dev private data */
649 	c_dev->data->dev_private = cptvf;
650 
651 	return 0;
652 
653 fail:
654 	if (cptvf) {
655 		/* Free private data allocated */
656 		rte_free(cptvf);
657 	}
658 
659 	return ret;
660 }
661