xref: /dpdk/drivers/compress/octeontx/otx_zip_pmd.c (revision c378f084d6e38b3bb2f838c32f01ed4a10d26c32)
143e610bbSSunila Sahu /* SPDX-License-Identifier: BSD-3-Clause
243e610bbSSunila Sahu  * Copyright(c) 2018 Cavium, Inc
343e610bbSSunila Sahu  */
443e610bbSSunila Sahu 
543e610bbSSunila Sahu #include <string.h>
643e610bbSSunila Sahu 
743e610bbSSunila Sahu #include <rte_byteorder.h>
843e610bbSSunila Sahu #include <rte_common.h>
943e610bbSSunila Sahu #include <rte_cpuflags.h>
1043e610bbSSunila Sahu #include <rte_malloc.h>
1143e610bbSSunila Sahu 
1243e610bbSSunila Sahu #include "otx_zip.h"
1343e610bbSSunila Sahu 
14*c378f084SAshish Gupta static const struct rte_compressdev_capabilities
15*c378f084SAshish Gupta 				octtx_zip_pmd_capabilities[] = {
16*c378f084SAshish Gupta 	{	.algo = RTE_COMP_ALGO_DEFLATE,
17*c378f084SAshish Gupta 		/* Deflate */
18*c378f084SAshish Gupta 		.comp_feature_flags =	RTE_COMP_FF_HUFFMAN_FIXED |
19*c378f084SAshish Gupta 					RTE_COMP_FF_HUFFMAN_DYNAMIC,
20*c378f084SAshish Gupta 		/* Non sharable Priv XFORM and Stateless */
21*c378f084SAshish Gupta 		.window_size = {
22*c378f084SAshish Gupta 				.min = 1,
23*c378f084SAshish Gupta 				.max = 14,
24*c378f084SAshish Gupta 				.increment = 1
25*c378f084SAshish Gupta 				/* size supported 2^1 to 2^14 */
26*c378f084SAshish Gupta 		},
27*c378f084SAshish Gupta 	},
28*c378f084SAshish Gupta 	RTE_COMP_END_OF_CAPABILITIES_LIST()
29*c378f084SAshish Gupta };
3043e610bbSSunila Sahu 
31*c378f084SAshish Gupta /** Configure device */
32*c378f084SAshish Gupta static int
33*c378f084SAshish Gupta zip_pmd_config(struct rte_compressdev *dev,
34*c378f084SAshish Gupta 		struct rte_compressdev_config *config)
35*c378f084SAshish Gupta {
36*c378f084SAshish Gupta 	int nb_streams;
37*c378f084SAshish Gupta 	char res_pool[RTE_MEMZONE_NAMESIZE];
38*c378f084SAshish Gupta 	struct zip_vf *vf;
39*c378f084SAshish Gupta 	struct rte_mempool *zip_buf_mp;
40*c378f084SAshish Gupta 
41*c378f084SAshish Gupta 	if (!config || !dev)
42*c378f084SAshish Gupta 		return -EIO;
43*c378f084SAshish Gupta 
44*c378f084SAshish Gupta 	vf = (struct zip_vf *)(dev->data->dev_private);
45*c378f084SAshish Gupta 
46*c378f084SAshish Gupta 	/* create pool with maximum numbers of resources
47*c378f084SAshish Gupta 	 * required by streams
48*c378f084SAshish Gupta 	 */
49*c378f084SAshish Gupta 
50*c378f084SAshish Gupta 	/* use common pool for non-shareable priv_xform and stream */
51*c378f084SAshish Gupta 	nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
52*c378f084SAshish Gupta 
53*c378f084SAshish Gupta 	snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
54*c378f084SAshish Gupta 		 dev->data->dev_id);
55*c378f084SAshish Gupta 
56*c378f084SAshish Gupta 	/** TBD Should we use the per core object cache for stream resources */
57*c378f084SAshish Gupta 	zip_buf_mp = rte_mempool_create(
58*c378f084SAshish Gupta 			res_pool,
59*c378f084SAshish Gupta 			nb_streams * MAX_BUFS_PER_STREAM,
60*c378f084SAshish Gupta 			ZIP_BUF_SIZE,
61*c378f084SAshish Gupta 			0,
62*c378f084SAshish Gupta 			0,
63*c378f084SAshish Gupta 			NULL,
64*c378f084SAshish Gupta 			NULL,
65*c378f084SAshish Gupta 			NULL,
66*c378f084SAshish Gupta 			NULL,
67*c378f084SAshish Gupta 			SOCKET_ID_ANY,
68*c378f084SAshish Gupta 			0);
69*c378f084SAshish Gupta 
70*c378f084SAshish Gupta 	if (zip_buf_mp == NULL) {
71*c378f084SAshish Gupta 		ZIP_PMD_ERR(
72*c378f084SAshish Gupta 			"Failed to create buf mempool octtx_zip_res_pool%u",
73*c378f084SAshish Gupta 			dev->data->dev_id);
74*c378f084SAshish Gupta 		return -1;
75*c378f084SAshish Gupta 	}
76*c378f084SAshish Gupta 
77*c378f084SAshish Gupta 	vf->zip_mp = zip_buf_mp;
78*c378f084SAshish Gupta 
79*c378f084SAshish Gupta 	return 0;
80*c378f084SAshish Gupta }
81*c378f084SAshish Gupta 
82*c378f084SAshish Gupta /** Start device */
83*c378f084SAshish Gupta static int
84*c378f084SAshish Gupta zip_pmd_start(__rte_unused struct rte_compressdev *dev)
85*c378f084SAshish Gupta {
86*c378f084SAshish Gupta 	return 0;
87*c378f084SAshish Gupta }
88*c378f084SAshish Gupta 
89*c378f084SAshish Gupta /** Stop device */
90*c378f084SAshish Gupta static void
91*c378f084SAshish Gupta zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
92*c378f084SAshish Gupta {
93*c378f084SAshish Gupta 
94*c378f084SAshish Gupta }
95*c378f084SAshish Gupta 
96*c378f084SAshish Gupta /** Close device */
97*c378f084SAshish Gupta static int
98*c378f084SAshish Gupta zip_pmd_close(struct rte_compressdev *dev)
99*c378f084SAshish Gupta {
100*c378f084SAshish Gupta 	if (dev == NULL)
101*c378f084SAshish Gupta 		return -1;
102*c378f084SAshish Gupta 
103*c378f084SAshish Gupta 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
104*c378f084SAshish Gupta 	rte_mempool_free(vf->zip_mp);
105*c378f084SAshish Gupta 
106*c378f084SAshish Gupta 	return 0;
107*c378f084SAshish Gupta }
108*c378f084SAshish Gupta 
109*c378f084SAshish Gupta /** Get device statistics */
110*c378f084SAshish Gupta static void
111*c378f084SAshish Gupta zip_pmd_stats_get(struct rte_compressdev *dev,
112*c378f084SAshish Gupta 		struct rte_compressdev_stats *stats)
113*c378f084SAshish Gupta {
114*c378f084SAshish Gupta 	int qp_id;
115*c378f084SAshish Gupta 
116*c378f084SAshish Gupta 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
117*c378f084SAshish Gupta 		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
118*c378f084SAshish Gupta 
119*c378f084SAshish Gupta 		stats->enqueued_count += qp->qp_stats.enqueued_count;
120*c378f084SAshish Gupta 		stats->dequeued_count += qp->qp_stats.dequeued_count;
121*c378f084SAshish Gupta 
122*c378f084SAshish Gupta 		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
123*c378f084SAshish Gupta 		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
124*c378f084SAshish Gupta 	}
125*c378f084SAshish Gupta }
126*c378f084SAshish Gupta 
127*c378f084SAshish Gupta /** Reset device statistics */
128*c378f084SAshish Gupta static void
129*c378f084SAshish Gupta zip_pmd_stats_reset(struct rte_compressdev *dev)
130*c378f084SAshish Gupta {
131*c378f084SAshish Gupta 	int qp_id;
132*c378f084SAshish Gupta 
133*c378f084SAshish Gupta 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
134*c378f084SAshish Gupta 		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
135*c378f084SAshish Gupta 		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
136*c378f084SAshish Gupta 	}
137*c378f084SAshish Gupta }
138*c378f084SAshish Gupta 
139*c378f084SAshish Gupta /** Get device info */
140*c378f084SAshish Gupta static void
141*c378f084SAshish Gupta zip_pmd_info_get(struct rte_compressdev *dev,
142*c378f084SAshish Gupta 		struct rte_compressdev_info *dev_info)
143*c378f084SAshish Gupta {
144*c378f084SAshish Gupta 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
145*c378f084SAshish Gupta 
146*c378f084SAshish Gupta 	if (dev_info != NULL) {
147*c378f084SAshish Gupta 		dev_info->driver_name = dev->device->driver->name;
148*c378f084SAshish Gupta 		dev_info->feature_flags = dev->feature_flags;
149*c378f084SAshish Gupta 		dev_info->capabilities = octtx_zip_pmd_capabilities;
150*c378f084SAshish Gupta 		dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
151*c378f084SAshish Gupta 	}
152*c378f084SAshish Gupta }
153*c378f084SAshish Gupta 
154*c378f084SAshish Gupta /** Release queue pair */
155*c378f084SAshish Gupta static int
156*c378f084SAshish Gupta zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
157*c378f084SAshish Gupta {
158*c378f084SAshish Gupta 	struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
159*c378f084SAshish Gupta 
160*c378f084SAshish Gupta 	if (qp != NULL) {
161*c378f084SAshish Gupta 		zipvf_q_term(qp);
162*c378f084SAshish Gupta 
163*c378f084SAshish Gupta 		if (qp->processed_pkts)
164*c378f084SAshish Gupta 			rte_ring_free(qp->processed_pkts);
165*c378f084SAshish Gupta 
166*c378f084SAshish Gupta 		rte_free(qp);
167*c378f084SAshish Gupta 		dev->data->queue_pairs[qp_id] = NULL;
168*c378f084SAshish Gupta 	}
169*c378f084SAshish Gupta 	return 0;
170*c378f084SAshish Gupta }
171*c378f084SAshish Gupta 
172*c378f084SAshish Gupta /** Create a ring to place process packets on */
173*c378f084SAshish Gupta static struct rte_ring *
174*c378f084SAshish Gupta zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
175*c378f084SAshish Gupta 		unsigned int ring_size, int socket_id)
176*c378f084SAshish Gupta {
177*c378f084SAshish Gupta 	struct rte_ring *r;
178*c378f084SAshish Gupta 
179*c378f084SAshish Gupta 	r = rte_ring_lookup(qp->name);
180*c378f084SAshish Gupta 	if (r) {
181*c378f084SAshish Gupta 		if (rte_ring_get_size(r) >= ring_size) {
182*c378f084SAshish Gupta 			ZIP_PMD_INFO("Reusing existing ring %s for processed"
183*c378f084SAshish Gupta 					" packets", qp->name);
184*c378f084SAshish Gupta 			return r;
185*c378f084SAshish Gupta 		}
186*c378f084SAshish Gupta 
187*c378f084SAshish Gupta 		ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
188*c378f084SAshish Gupta 				" packets", qp->name);
189*c378f084SAshish Gupta 		return NULL;
190*c378f084SAshish Gupta 	}
191*c378f084SAshish Gupta 
192*c378f084SAshish Gupta 	return rte_ring_create(qp->name, ring_size, socket_id,
193*c378f084SAshish Gupta 						RING_F_EXACT_SZ);
194*c378f084SAshish Gupta }
195*c378f084SAshish Gupta 
196*c378f084SAshish Gupta /** Setup a queue pair */
197*c378f084SAshish Gupta static int
198*c378f084SAshish Gupta zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
199*c378f084SAshish Gupta 		uint32_t max_inflight_ops, int socket_id)
200*c378f084SAshish Gupta {
201*c378f084SAshish Gupta 	struct zipvf_qp *qp = NULL;
202*c378f084SAshish Gupta 	struct zip_vf *vf;
203*c378f084SAshish Gupta 	char *name;
204*c378f084SAshish Gupta 	int ret;
205*c378f084SAshish Gupta 
206*c378f084SAshish Gupta 	if (!dev)
207*c378f084SAshish Gupta 		return -1;
208*c378f084SAshish Gupta 
209*c378f084SAshish Gupta 	vf = (struct zip_vf *) (dev->data->dev_private);
210*c378f084SAshish Gupta 
211*c378f084SAshish Gupta 	/* Free memory prior to re-allocation if needed. */
212*c378f084SAshish Gupta 	if (dev->data->queue_pairs[qp_id] != NULL) {
213*c378f084SAshish Gupta 		ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
214*c378f084SAshish Gupta 		return 0;
215*c378f084SAshish Gupta 	}
216*c378f084SAshish Gupta 
217*c378f084SAshish Gupta 	name =  rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
218*c378f084SAshish Gupta 	snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
219*c378f084SAshish Gupta 		 "zip_pmd_%u_qp_%u",
220*c378f084SAshish Gupta 		 dev->data->dev_id, qp_id);
221*c378f084SAshish Gupta 
222*c378f084SAshish Gupta 	/* Allocate the queue pair data structure. */
223*c378f084SAshish Gupta 	qp = rte_zmalloc_socket(name, sizeof(*qp),
224*c378f084SAshish Gupta 				RTE_CACHE_LINE_SIZE, socket_id);
225*c378f084SAshish Gupta 	if (qp == NULL)
226*c378f084SAshish Gupta 		return (-ENOMEM);
227*c378f084SAshish Gupta 
228*c378f084SAshish Gupta 	qp->name = name;
229*c378f084SAshish Gupta 
230*c378f084SAshish Gupta 	/* Create completion queue upto max_inflight_ops */
231*c378f084SAshish Gupta 	qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
232*c378f084SAshish Gupta 						max_inflight_ops, socket_id);
233*c378f084SAshish Gupta 	if (qp->processed_pkts == NULL)
234*c378f084SAshish Gupta 		goto qp_setup_cleanup;
235*c378f084SAshish Gupta 
236*c378f084SAshish Gupta 	qp->id = qp_id;
237*c378f084SAshish Gupta 	qp->vf = vf;
238*c378f084SAshish Gupta 
239*c378f084SAshish Gupta 	ret = zipvf_q_init(qp);
240*c378f084SAshish Gupta 	if (ret < 0)
241*c378f084SAshish Gupta 		goto qp_setup_cleanup;
242*c378f084SAshish Gupta 
243*c378f084SAshish Gupta 	dev->data->queue_pairs[qp_id] = qp;
244*c378f084SAshish Gupta 
245*c378f084SAshish Gupta 	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
246*c378f084SAshish Gupta 	return 0;
247*c378f084SAshish Gupta 
248*c378f084SAshish Gupta qp_setup_cleanup:
249*c378f084SAshish Gupta 	if (qp->processed_pkts)
250*c378f084SAshish Gupta 		rte_ring_free(qp->processed_pkts);
251*c378f084SAshish Gupta 	if (qp)
252*c378f084SAshish Gupta 		rte_free(qp);
253*c378f084SAshish Gupta 	return -1;
254*c378f084SAshish Gupta }
255*c378f084SAshish Gupta 
256*c378f084SAshish Gupta struct rte_compressdev_ops octtx_zip_pmd_ops = {
257*c378f084SAshish Gupta 		.dev_configure		= zip_pmd_config,
258*c378f084SAshish Gupta 		.dev_start		= zip_pmd_start,
259*c378f084SAshish Gupta 		.dev_stop		= zip_pmd_stop,
260*c378f084SAshish Gupta 		.dev_close		= zip_pmd_close,
261*c378f084SAshish Gupta 
262*c378f084SAshish Gupta 		.stats_get		= zip_pmd_stats_get,
263*c378f084SAshish Gupta 		.stats_reset		= zip_pmd_stats_reset,
264*c378f084SAshish Gupta 
265*c378f084SAshish Gupta 		.dev_infos_get		= zip_pmd_info_get,
266*c378f084SAshish Gupta 
267*c378f084SAshish Gupta 		.queue_pair_setup	= zip_pmd_qp_setup,
268*c378f084SAshish Gupta 		.queue_pair_release	= zip_pmd_qp_release,
26943e610bbSSunila Sahu };
27043e610bbSSunila Sahu 
27143e610bbSSunila Sahu static int
27243e610bbSSunila Sahu zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
27343e610bbSSunila Sahu 	struct rte_pci_device *pci_dev)
27443e610bbSSunila Sahu {
27543e610bbSSunila Sahu 	int ret = 0;
27643e610bbSSunila Sahu 	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
27743e610bbSSunila Sahu 	struct rte_compressdev *compressdev;
27843e610bbSSunila Sahu 	struct rte_compressdev_pmd_init_params init_params = {
27943e610bbSSunila Sahu 		"",
28043e610bbSSunila Sahu 		rte_socket_id(),
28143e610bbSSunila Sahu 	};
28243e610bbSSunila Sahu 
28343e610bbSSunila Sahu 	ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
28443e610bbSSunila Sahu 			(unsigned int)pci_dev->id.vendor_id,
28543e610bbSSunila Sahu 			(unsigned int)pci_dev->id.device_id);
28643e610bbSSunila Sahu 
28743e610bbSSunila Sahu 	rte_pci_device_name(&pci_dev->addr, compressdev_name,
28843e610bbSSunila Sahu 			    sizeof(compressdev_name));
28943e610bbSSunila Sahu 
29043e610bbSSunila Sahu 	compressdev = rte_compressdev_pmd_create(compressdev_name,
29143e610bbSSunila Sahu 		&pci_dev->device, sizeof(struct zip_vf), &init_params);
29243e610bbSSunila Sahu 	if (compressdev == NULL) {
29343e610bbSSunila Sahu 		ZIP_PMD_ERR("driver %s: create failed", init_params.name);
29443e610bbSSunila Sahu 		return -ENODEV;
29543e610bbSSunila Sahu 	}
29643e610bbSSunila Sahu 
29743e610bbSSunila Sahu 	/*
29843e610bbSSunila Sahu 	 * create only if proc_type is primary.
29943e610bbSSunila Sahu 	 */
30043e610bbSSunila Sahu 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
30143e610bbSSunila Sahu 		/*  create vf dev with given pmd dev id */
30243e610bbSSunila Sahu 		ret = zipvf_create(compressdev);
30343e610bbSSunila Sahu 		if (ret < 0) {
30443e610bbSSunila Sahu 			ZIP_PMD_ERR("Device creation failed");
30543e610bbSSunila Sahu 			rte_compressdev_pmd_destroy(compressdev);
30643e610bbSSunila Sahu 			return ret;
30743e610bbSSunila Sahu 		}
30843e610bbSSunila Sahu 	}
30943e610bbSSunila Sahu 
31043e610bbSSunila Sahu 	compressdev->dev_ops = &octtx_zip_pmd_ops;
31143e610bbSSunila Sahu 	/* register rx/tx burst functions for data path */
31243e610bbSSunila Sahu 	compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
31343e610bbSSunila Sahu 	return ret;
31443e610bbSSunila Sahu }
31543e610bbSSunila Sahu 
31643e610bbSSunila Sahu static int
31743e610bbSSunila Sahu zip_pci_remove(struct rte_pci_device *pci_dev)
31843e610bbSSunila Sahu {
31943e610bbSSunila Sahu 	struct rte_compressdev *compressdev;
32043e610bbSSunila Sahu 	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
32143e610bbSSunila Sahu 
32243e610bbSSunila Sahu 	if (pci_dev == NULL) {
32343e610bbSSunila Sahu 		ZIP_PMD_ERR(" Invalid PCI Device\n");
32443e610bbSSunila Sahu 		return -EINVAL;
32543e610bbSSunila Sahu 	}
32643e610bbSSunila Sahu 	rte_pci_device_name(&pci_dev->addr, compressdev_name,
32743e610bbSSunila Sahu 			sizeof(compressdev_name));
32843e610bbSSunila Sahu 
32943e610bbSSunila Sahu 	compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
33043e610bbSSunila Sahu 	if (compressdev == NULL)
33143e610bbSSunila Sahu 		return -ENODEV;
33243e610bbSSunila Sahu 
33343e610bbSSunila Sahu 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
33443e610bbSSunila Sahu 		if (zipvf_destroy(compressdev) < 0)
33543e610bbSSunila Sahu 			return -ENODEV;
33643e610bbSSunila Sahu 	}
33743e610bbSSunila Sahu 	return rte_compressdev_pmd_destroy(compressdev);
33843e610bbSSunila Sahu }
33943e610bbSSunila Sahu 
34043e610bbSSunila Sahu static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
34143e610bbSSunila Sahu 	{
34243e610bbSSunila Sahu 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
34343e610bbSSunila Sahu 			PCI_DEVICE_ID_OCTEONTX_ZIPVF),
34443e610bbSSunila Sahu 	},
34543e610bbSSunila Sahu 	{
34643e610bbSSunila Sahu 		.device_id = 0
34743e610bbSSunila Sahu 	},
34843e610bbSSunila Sahu };
34943e610bbSSunila Sahu 
35043e610bbSSunila Sahu /**
35143e610bbSSunila Sahu  * Structure that represents a PCI driver
35243e610bbSSunila Sahu  */
35343e610bbSSunila Sahu static struct rte_pci_driver octtx_zip_pmd = {
35443e610bbSSunila Sahu 	.id_table    = pci_id_octtx_zipvf_table,
35543e610bbSSunila Sahu 	.drv_flags   = RTE_PCI_DRV_NEED_MAPPING,
35643e610bbSSunila Sahu 	.probe       = zip_pci_probe,
35743e610bbSSunila Sahu 	.remove      = zip_pci_remove,
35843e610bbSSunila Sahu };
35943e610bbSSunila Sahu 
36043e610bbSSunila Sahu RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
36143e610bbSSunila Sahu RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
36243e610bbSSunila Sahu 
36343e610bbSSunila Sahu RTE_INIT(octtx_zip_init_log);
36443e610bbSSunila Sahu 
36543e610bbSSunila Sahu static void
36643e610bbSSunila Sahu octtx_zip_init_log(void)
36743e610bbSSunila Sahu {
36843e610bbSSunila Sahu 	octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
36943e610bbSSunila Sahu 	if (octtx_zip_logtype_driver >= 0)
37043e610bbSSunila Sahu 		rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO);
37143e610bbSSunila Sahu }
372