xref: /dpdk/drivers/compress/octeontx/otx_zip_pmd.c (revision b43ebc65aadacd5db664c49b60c427872f7661c7)
143e610bbSSunila Sahu /* SPDX-License-Identifier: BSD-3-Clause
243e610bbSSunila Sahu  * Copyright(c) 2018 Cavium, Inc
343e610bbSSunila Sahu  */
443e610bbSSunila Sahu 
543e610bbSSunila Sahu #include <string.h>
643e610bbSSunila Sahu 
743e610bbSSunila Sahu #include <rte_byteorder.h>
843e610bbSSunila Sahu #include <rte_common.h>
943e610bbSSunila Sahu #include <rte_cpuflags.h>
1043e610bbSSunila Sahu #include <rte_malloc.h>
1143e610bbSSunila Sahu 
1243e610bbSSunila Sahu #include "otx_zip.h"
1343e610bbSSunila Sahu 
14c378f084SAshish Gupta static const struct rte_compressdev_capabilities
15c378f084SAshish Gupta 				octtx_zip_pmd_capabilities[] = {
16c378f084SAshish Gupta 	{	.algo = RTE_COMP_ALGO_DEFLATE,
17c378f084SAshish Gupta 		/* Deflate */
18c378f084SAshish Gupta 		.comp_feature_flags =	RTE_COMP_FF_HUFFMAN_FIXED |
19c378f084SAshish Gupta 					RTE_COMP_FF_HUFFMAN_DYNAMIC,
20c378f084SAshish Gupta 		/* Non sharable Priv XFORM and Stateless */
21c378f084SAshish Gupta 		.window_size = {
22c378f084SAshish Gupta 				.min = 1,
23c378f084SAshish Gupta 				.max = 14,
24c378f084SAshish Gupta 				.increment = 1
25c378f084SAshish Gupta 				/* size supported 2^1 to 2^14 */
26c378f084SAshish Gupta 		},
27c378f084SAshish Gupta 	},
28c378f084SAshish Gupta 	RTE_COMP_END_OF_CAPABILITIES_LIST()
29c378f084SAshish Gupta };
3043e610bbSSunila Sahu 
31*b43ebc65SAshish Gupta /** Parse xform parameters and setup a stream */
32*b43ebc65SAshish Gupta static int
33*b43ebc65SAshish Gupta zip_set_stream_parameters(struct rte_compressdev *dev,
34*b43ebc65SAshish Gupta 			const struct rte_comp_xform *xform,
35*b43ebc65SAshish Gupta 			struct zip_stream *z_stream)
36*b43ebc65SAshish Gupta {
37*b43ebc65SAshish Gupta 	int ret;
38*b43ebc65SAshish Gupta 	union zip_inst_s *inst;
39*b43ebc65SAshish Gupta 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
40*b43ebc65SAshish Gupta 	void *res;
41*b43ebc65SAshish Gupta 
42*b43ebc65SAshish Gupta 	/* Allocate resources required by a stream */
43*b43ebc65SAshish Gupta 	ret = rte_mempool_get_bulk(vf->zip_mp,
44*b43ebc65SAshish Gupta 			z_stream->bufs, MAX_BUFS_PER_STREAM);
45*b43ebc65SAshish Gupta 	if (ret < 0)
46*b43ebc65SAshish Gupta 		return -1;
47*b43ebc65SAshish Gupta 
48*b43ebc65SAshish Gupta 	/* get one command buffer from pool and set up */
49*b43ebc65SAshish Gupta 	inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
50*b43ebc65SAshish Gupta 	res = z_stream->bufs[RES_BUF];
51*b43ebc65SAshish Gupta 
52*b43ebc65SAshish Gupta 	memset(inst->u, 0, sizeof(inst->u));
53*b43ebc65SAshish Gupta 
54*b43ebc65SAshish Gupta 	/* set bf for only first ops of stream */
55*b43ebc65SAshish Gupta 	inst->s.bf = 1;
56*b43ebc65SAshish Gupta 
57*b43ebc65SAshish Gupta 	if (xform->type == RTE_COMP_COMPRESS) {
58*b43ebc65SAshish Gupta 		inst->s.op = ZIP_OP_E_COMP;
59*b43ebc65SAshish Gupta 
60*b43ebc65SAshish Gupta 		switch (xform->compress.deflate.huffman) {
61*b43ebc65SAshish Gupta 		case RTE_COMP_HUFFMAN_DEFAULT:
62*b43ebc65SAshish Gupta 			inst->s.cc = ZIP_CC_DEFAULT;
63*b43ebc65SAshish Gupta 			break;
64*b43ebc65SAshish Gupta 		case RTE_COMP_HUFFMAN_FIXED:
65*b43ebc65SAshish Gupta 			inst->s.cc = ZIP_CC_FIXED_HUFF;
66*b43ebc65SAshish Gupta 			break;
67*b43ebc65SAshish Gupta 		case RTE_COMP_HUFFMAN_DYNAMIC:
68*b43ebc65SAshish Gupta 			inst->s.cc = ZIP_CC_DYN_HUFF;
69*b43ebc65SAshish Gupta 			break;
70*b43ebc65SAshish Gupta 		default:
71*b43ebc65SAshish Gupta 			ret = -1;
72*b43ebc65SAshish Gupta 			goto err;
73*b43ebc65SAshish Gupta 		}
74*b43ebc65SAshish Gupta 
75*b43ebc65SAshish Gupta 		switch (xform->compress.level) {
76*b43ebc65SAshish Gupta 		case RTE_COMP_LEVEL_MIN:
77*b43ebc65SAshish Gupta 			inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
78*b43ebc65SAshish Gupta 			break;
79*b43ebc65SAshish Gupta 		case RTE_COMP_LEVEL_MAX:
80*b43ebc65SAshish Gupta 			inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
81*b43ebc65SAshish Gupta 			break;
82*b43ebc65SAshish Gupta 		case RTE_COMP_LEVEL_NONE:
83*b43ebc65SAshish Gupta 			ZIP_PMD_ERR("Compression level not supported");
84*b43ebc65SAshish Gupta 			ret = -1;
85*b43ebc65SAshish Gupta 			goto err;
86*b43ebc65SAshish Gupta 		default:
87*b43ebc65SAshish Gupta 			/* for any value between min and max , choose
88*b43ebc65SAshish Gupta 			 * PMD default.
89*b43ebc65SAshish Gupta 			 */
90*b43ebc65SAshish Gupta 			inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
91*b43ebc65SAshish Gupta 			break;
92*b43ebc65SAshish Gupta 		}
93*b43ebc65SAshish Gupta 	} else if (xform->type == RTE_COMP_DECOMPRESS) {
94*b43ebc65SAshish Gupta 		inst->s.op = ZIP_OP_E_DECOMP;
95*b43ebc65SAshish Gupta 		/* from HRM,
96*b43ebc65SAshish Gupta 		 * For DEFLATE decompression, [CC] must be 0x0.
97*b43ebc65SAshish Gupta 		 * For decompression, [SS] must be 0x0
98*b43ebc65SAshish Gupta 		 */
99*b43ebc65SAshish Gupta 		inst->s.cc = 0;
100*b43ebc65SAshish Gupta 		/* Speed bit should not be set for decompression */
101*b43ebc65SAshish Gupta 		inst->s.ss = 0;
102*b43ebc65SAshish Gupta 		/* decompression context is supported only for STATEFUL
103*b43ebc65SAshish Gupta 		 * operations. Currently we support STATELESS ONLY so
104*b43ebc65SAshish Gupta 		 * skip setting of ctx pointer
105*b43ebc65SAshish Gupta 		 */
106*b43ebc65SAshish Gupta 
107*b43ebc65SAshish Gupta 	} else {
108*b43ebc65SAshish Gupta 		ZIP_PMD_ERR("\nxform type not supported");
109*b43ebc65SAshish Gupta 		ret = -1;
110*b43ebc65SAshish Gupta 		goto err;
111*b43ebc65SAshish Gupta 	}
112*b43ebc65SAshish Gupta 
113*b43ebc65SAshish Gupta 	inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
114*b43ebc65SAshish Gupta 	inst->s.res_ptr_ctl.s.length = 0;
115*b43ebc65SAshish Gupta 
116*b43ebc65SAshish Gupta 	z_stream->inst = inst;
117*b43ebc65SAshish Gupta 
118*b43ebc65SAshish Gupta 	return 0;
119*b43ebc65SAshish Gupta 
120*b43ebc65SAshish Gupta err:
121*b43ebc65SAshish Gupta 	rte_mempool_put_bulk(vf->zip_mp,
122*b43ebc65SAshish Gupta 			     (void *)&(z_stream->bufs[0]),
123*b43ebc65SAshish Gupta 			     MAX_BUFS_PER_STREAM);
124*b43ebc65SAshish Gupta 
125*b43ebc65SAshish Gupta 	return ret;
126*b43ebc65SAshish Gupta }
127*b43ebc65SAshish Gupta 
128c378f084SAshish Gupta /** Configure device */
129c378f084SAshish Gupta static int
130c378f084SAshish Gupta zip_pmd_config(struct rte_compressdev *dev,
131c378f084SAshish Gupta 		struct rte_compressdev_config *config)
132c378f084SAshish Gupta {
133c378f084SAshish Gupta 	int nb_streams;
134c378f084SAshish Gupta 	char res_pool[RTE_MEMZONE_NAMESIZE];
135c378f084SAshish Gupta 	struct zip_vf *vf;
136c378f084SAshish Gupta 	struct rte_mempool *zip_buf_mp;
137c378f084SAshish Gupta 
138c378f084SAshish Gupta 	if (!config || !dev)
139c378f084SAshish Gupta 		return -EIO;
140c378f084SAshish Gupta 
141c378f084SAshish Gupta 	vf = (struct zip_vf *)(dev->data->dev_private);
142c378f084SAshish Gupta 
143c378f084SAshish Gupta 	/* create pool with maximum numbers of resources
144c378f084SAshish Gupta 	 * required by streams
145c378f084SAshish Gupta 	 */
146c378f084SAshish Gupta 
147c378f084SAshish Gupta 	/* use common pool for non-shareable priv_xform and stream */
148c378f084SAshish Gupta 	nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
149c378f084SAshish Gupta 
150c378f084SAshish Gupta 	snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
151c378f084SAshish Gupta 		 dev->data->dev_id);
152c378f084SAshish Gupta 
153c378f084SAshish Gupta 	/** TBD Should we use the per core object cache for stream resources */
154c378f084SAshish Gupta 	zip_buf_mp = rte_mempool_create(
155c378f084SAshish Gupta 			res_pool,
156c378f084SAshish Gupta 			nb_streams * MAX_BUFS_PER_STREAM,
157c378f084SAshish Gupta 			ZIP_BUF_SIZE,
158c378f084SAshish Gupta 			0,
159c378f084SAshish Gupta 			0,
160c378f084SAshish Gupta 			NULL,
161c378f084SAshish Gupta 			NULL,
162c378f084SAshish Gupta 			NULL,
163c378f084SAshish Gupta 			NULL,
164c378f084SAshish Gupta 			SOCKET_ID_ANY,
165c378f084SAshish Gupta 			0);
166c378f084SAshish Gupta 
167c378f084SAshish Gupta 	if (zip_buf_mp == NULL) {
168c378f084SAshish Gupta 		ZIP_PMD_ERR(
169c378f084SAshish Gupta 			"Failed to create buf mempool octtx_zip_res_pool%u",
170c378f084SAshish Gupta 			dev->data->dev_id);
171c378f084SAshish Gupta 		return -1;
172c378f084SAshish Gupta 	}
173c378f084SAshish Gupta 
174c378f084SAshish Gupta 	vf->zip_mp = zip_buf_mp;
175c378f084SAshish Gupta 
176c378f084SAshish Gupta 	return 0;
177c378f084SAshish Gupta }
178c378f084SAshish Gupta 
179c378f084SAshish Gupta /** Start device */
180c378f084SAshish Gupta static int
181c378f084SAshish Gupta zip_pmd_start(__rte_unused struct rte_compressdev *dev)
182c378f084SAshish Gupta {
183c378f084SAshish Gupta 	return 0;
184c378f084SAshish Gupta }
185c378f084SAshish Gupta 
186c378f084SAshish Gupta /** Stop device */
187c378f084SAshish Gupta static void
188c378f084SAshish Gupta zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
189c378f084SAshish Gupta {
190c378f084SAshish Gupta 
191c378f084SAshish Gupta }
192c378f084SAshish Gupta 
193c378f084SAshish Gupta /** Close device */
194c378f084SAshish Gupta static int
195c378f084SAshish Gupta zip_pmd_close(struct rte_compressdev *dev)
196c378f084SAshish Gupta {
197c378f084SAshish Gupta 	if (dev == NULL)
198c378f084SAshish Gupta 		return -1;
199c378f084SAshish Gupta 
200c378f084SAshish Gupta 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
201c378f084SAshish Gupta 	rte_mempool_free(vf->zip_mp);
202c378f084SAshish Gupta 
203c378f084SAshish Gupta 	return 0;
204c378f084SAshish Gupta }
205c378f084SAshish Gupta 
206c378f084SAshish Gupta /** Get device statistics */
207c378f084SAshish Gupta static void
208c378f084SAshish Gupta zip_pmd_stats_get(struct rte_compressdev *dev,
209c378f084SAshish Gupta 		struct rte_compressdev_stats *stats)
210c378f084SAshish Gupta {
211c378f084SAshish Gupta 	int qp_id;
212c378f084SAshish Gupta 
213c378f084SAshish Gupta 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
214c378f084SAshish Gupta 		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
215c378f084SAshish Gupta 
216c378f084SAshish Gupta 		stats->enqueued_count += qp->qp_stats.enqueued_count;
217c378f084SAshish Gupta 		stats->dequeued_count += qp->qp_stats.dequeued_count;
218c378f084SAshish Gupta 
219c378f084SAshish Gupta 		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
220c378f084SAshish Gupta 		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
221c378f084SAshish Gupta 	}
222c378f084SAshish Gupta }
223c378f084SAshish Gupta 
224c378f084SAshish Gupta /** Reset device statistics */
225c378f084SAshish Gupta static void
226c378f084SAshish Gupta zip_pmd_stats_reset(struct rte_compressdev *dev)
227c378f084SAshish Gupta {
228c378f084SAshish Gupta 	int qp_id;
229c378f084SAshish Gupta 
230c378f084SAshish Gupta 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
231c378f084SAshish Gupta 		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
232c378f084SAshish Gupta 		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
233c378f084SAshish Gupta 	}
234c378f084SAshish Gupta }
235c378f084SAshish Gupta 
236c378f084SAshish Gupta /** Get device info */
237c378f084SAshish Gupta static void
238c378f084SAshish Gupta zip_pmd_info_get(struct rte_compressdev *dev,
239c378f084SAshish Gupta 		struct rte_compressdev_info *dev_info)
240c378f084SAshish Gupta {
241c378f084SAshish Gupta 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
242c378f084SAshish Gupta 
243c378f084SAshish Gupta 	if (dev_info != NULL) {
244c378f084SAshish Gupta 		dev_info->driver_name = dev->device->driver->name;
245c378f084SAshish Gupta 		dev_info->feature_flags = dev->feature_flags;
246c378f084SAshish Gupta 		dev_info->capabilities = octtx_zip_pmd_capabilities;
247c378f084SAshish Gupta 		dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
248c378f084SAshish Gupta 	}
249c378f084SAshish Gupta }
250c378f084SAshish Gupta 
251c378f084SAshish Gupta /** Release queue pair */
252c378f084SAshish Gupta static int
253c378f084SAshish Gupta zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
254c378f084SAshish Gupta {
255c378f084SAshish Gupta 	struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
256c378f084SAshish Gupta 
257c378f084SAshish Gupta 	if (qp != NULL) {
258c378f084SAshish Gupta 		zipvf_q_term(qp);
259c378f084SAshish Gupta 
260c378f084SAshish Gupta 		if (qp->processed_pkts)
261c378f084SAshish Gupta 			rte_ring_free(qp->processed_pkts);
262c378f084SAshish Gupta 
263c378f084SAshish Gupta 		rte_free(qp);
264c378f084SAshish Gupta 		dev->data->queue_pairs[qp_id] = NULL;
265c378f084SAshish Gupta 	}
266c378f084SAshish Gupta 	return 0;
267c378f084SAshish Gupta }
268c378f084SAshish Gupta 
269c378f084SAshish Gupta /** Create a ring to place process packets on */
270c378f084SAshish Gupta static struct rte_ring *
271c378f084SAshish Gupta zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
272c378f084SAshish Gupta 		unsigned int ring_size, int socket_id)
273c378f084SAshish Gupta {
274c378f084SAshish Gupta 	struct rte_ring *r;
275c378f084SAshish Gupta 
276c378f084SAshish Gupta 	r = rte_ring_lookup(qp->name);
277c378f084SAshish Gupta 	if (r) {
278c378f084SAshish Gupta 		if (rte_ring_get_size(r) >= ring_size) {
279c378f084SAshish Gupta 			ZIP_PMD_INFO("Reusing existing ring %s for processed"
280c378f084SAshish Gupta 					" packets", qp->name);
281c378f084SAshish Gupta 			return r;
282c378f084SAshish Gupta 		}
283c378f084SAshish Gupta 
284c378f084SAshish Gupta 		ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
285c378f084SAshish Gupta 				" packets", qp->name);
286c378f084SAshish Gupta 		return NULL;
287c378f084SAshish Gupta 	}
288c378f084SAshish Gupta 
289c378f084SAshish Gupta 	return rte_ring_create(qp->name, ring_size, socket_id,
290c378f084SAshish Gupta 						RING_F_EXACT_SZ);
291c378f084SAshish Gupta }
292c378f084SAshish Gupta 
293c378f084SAshish Gupta /** Setup a queue pair */
294c378f084SAshish Gupta static int
295c378f084SAshish Gupta zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
296c378f084SAshish Gupta 		uint32_t max_inflight_ops, int socket_id)
297c378f084SAshish Gupta {
298c378f084SAshish Gupta 	struct zipvf_qp *qp = NULL;
299c378f084SAshish Gupta 	struct zip_vf *vf;
300c378f084SAshish Gupta 	char *name;
301c378f084SAshish Gupta 	int ret;
302c378f084SAshish Gupta 
303c378f084SAshish Gupta 	if (!dev)
304c378f084SAshish Gupta 		return -1;
305c378f084SAshish Gupta 
306c378f084SAshish Gupta 	vf = (struct zip_vf *) (dev->data->dev_private);
307c378f084SAshish Gupta 
308c378f084SAshish Gupta 	/* Free memory prior to re-allocation if needed. */
309c378f084SAshish Gupta 	if (dev->data->queue_pairs[qp_id] != NULL) {
310c378f084SAshish Gupta 		ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
311c378f084SAshish Gupta 		return 0;
312c378f084SAshish Gupta 	}
313c378f084SAshish Gupta 
314c378f084SAshish Gupta 	name =  rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
315c378f084SAshish Gupta 	snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
316c378f084SAshish Gupta 		 "zip_pmd_%u_qp_%u",
317c378f084SAshish Gupta 		 dev->data->dev_id, qp_id);
318c378f084SAshish Gupta 
319c378f084SAshish Gupta 	/* Allocate the queue pair data structure. */
320c378f084SAshish Gupta 	qp = rte_zmalloc_socket(name, sizeof(*qp),
321c378f084SAshish Gupta 				RTE_CACHE_LINE_SIZE, socket_id);
322c378f084SAshish Gupta 	if (qp == NULL)
323c378f084SAshish Gupta 		return (-ENOMEM);
324c378f084SAshish Gupta 
325c378f084SAshish Gupta 	qp->name = name;
326c378f084SAshish Gupta 
327c378f084SAshish Gupta 	/* Create completion queue upto max_inflight_ops */
328c378f084SAshish Gupta 	qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
329c378f084SAshish Gupta 						max_inflight_ops, socket_id);
330c378f084SAshish Gupta 	if (qp->processed_pkts == NULL)
331c378f084SAshish Gupta 		goto qp_setup_cleanup;
332c378f084SAshish Gupta 
333c378f084SAshish Gupta 	qp->id = qp_id;
334c378f084SAshish Gupta 	qp->vf = vf;
335c378f084SAshish Gupta 
336c378f084SAshish Gupta 	ret = zipvf_q_init(qp);
337c378f084SAshish Gupta 	if (ret < 0)
338c378f084SAshish Gupta 		goto qp_setup_cleanup;
339c378f084SAshish Gupta 
340c378f084SAshish Gupta 	dev->data->queue_pairs[qp_id] = qp;
341c378f084SAshish Gupta 
342c378f084SAshish Gupta 	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
343c378f084SAshish Gupta 	return 0;
344c378f084SAshish Gupta 
345c378f084SAshish Gupta qp_setup_cleanup:
346c378f084SAshish Gupta 	if (qp->processed_pkts)
347c378f084SAshish Gupta 		rte_ring_free(qp->processed_pkts);
348c378f084SAshish Gupta 	if (qp)
349c378f084SAshish Gupta 		rte_free(qp);
350c378f084SAshish Gupta 	return -1;
351c378f084SAshish Gupta }
352c378f084SAshish Gupta 
353*b43ebc65SAshish Gupta static int
354*b43ebc65SAshish Gupta zip_pmd_stream_create(struct rte_compressdev *dev,
355*b43ebc65SAshish Gupta 		const struct rte_comp_xform *xform, void **stream)
356*b43ebc65SAshish Gupta {
357*b43ebc65SAshish Gupta 	int ret;
358*b43ebc65SAshish Gupta 	struct zip_stream *strm = NULL;
359*b43ebc65SAshish Gupta 
360*b43ebc65SAshish Gupta 	strm = rte_malloc(NULL,
361*b43ebc65SAshish Gupta 			sizeof(struct zip_stream), 0);
362*b43ebc65SAshish Gupta 
363*b43ebc65SAshish Gupta 	if (strm == NULL)
364*b43ebc65SAshish Gupta 		return (-ENOMEM);
365*b43ebc65SAshish Gupta 
366*b43ebc65SAshish Gupta 	ret = zip_set_stream_parameters(dev, xform, strm);
367*b43ebc65SAshish Gupta 	if (ret < 0) {
368*b43ebc65SAshish Gupta 		ZIP_PMD_ERR("failed configure xform parameters");
369*b43ebc65SAshish Gupta 		rte_free(strm);
370*b43ebc65SAshish Gupta 		return ret;
371*b43ebc65SAshish Gupta 	}
372*b43ebc65SAshish Gupta 	*stream = strm;
373*b43ebc65SAshish Gupta 	return 0;
374*b43ebc65SAshish Gupta }
375*b43ebc65SAshish Gupta 
376*b43ebc65SAshish Gupta static int
377*b43ebc65SAshish Gupta zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
378*b43ebc65SAshish Gupta {
379*b43ebc65SAshish Gupta 	struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
380*b43ebc65SAshish Gupta 	struct zip_stream *z_stream;
381*b43ebc65SAshish Gupta 
382*b43ebc65SAshish Gupta 	if (stream == NULL)
383*b43ebc65SAshish Gupta 		return 0;
384*b43ebc65SAshish Gupta 
385*b43ebc65SAshish Gupta 	z_stream = (struct zip_stream *)stream;
386*b43ebc65SAshish Gupta 
387*b43ebc65SAshish Gupta 	/* Free resources back to pool */
388*b43ebc65SAshish Gupta 	rte_mempool_put_bulk(vf->zip_mp,
389*b43ebc65SAshish Gupta 				(void *)&(z_stream->bufs[0]),
390*b43ebc65SAshish Gupta 				MAX_BUFS_PER_STREAM);
391*b43ebc65SAshish Gupta 
392*b43ebc65SAshish Gupta 	/* Zero out the whole structure */
393*b43ebc65SAshish Gupta 	memset(stream, 0, sizeof(struct zip_stream));
394*b43ebc65SAshish Gupta 	rte_free(stream);
395*b43ebc65SAshish Gupta 
396*b43ebc65SAshish Gupta 	return 0;
397*b43ebc65SAshish Gupta }
398*b43ebc65SAshish Gupta 
399*b43ebc65SAshish Gupta 
400c378f084SAshish Gupta struct rte_compressdev_ops octtx_zip_pmd_ops = {
401c378f084SAshish Gupta 		.dev_configure		= zip_pmd_config,
402c378f084SAshish Gupta 		.dev_start		= zip_pmd_start,
403c378f084SAshish Gupta 		.dev_stop		= zip_pmd_stop,
404c378f084SAshish Gupta 		.dev_close		= zip_pmd_close,
405c378f084SAshish Gupta 
406c378f084SAshish Gupta 		.stats_get		= zip_pmd_stats_get,
407c378f084SAshish Gupta 		.stats_reset		= zip_pmd_stats_reset,
408c378f084SAshish Gupta 
409c378f084SAshish Gupta 		.dev_infos_get		= zip_pmd_info_get,
410c378f084SAshish Gupta 
411c378f084SAshish Gupta 		.queue_pair_setup	= zip_pmd_qp_setup,
412c378f084SAshish Gupta 		.queue_pair_release	= zip_pmd_qp_release,
413*b43ebc65SAshish Gupta 
414*b43ebc65SAshish Gupta 		.private_xform_create	= zip_pmd_stream_create,
415*b43ebc65SAshish Gupta 		.private_xform_free	= zip_pmd_stream_free,
416*b43ebc65SAshish Gupta 		.stream_create		= NULL,
417*b43ebc65SAshish Gupta 		.stream_free		= NULL
41843e610bbSSunila Sahu };
41943e610bbSSunila Sahu 
42043e610bbSSunila Sahu static int
42143e610bbSSunila Sahu zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
42243e610bbSSunila Sahu 	struct rte_pci_device *pci_dev)
42343e610bbSSunila Sahu {
42443e610bbSSunila Sahu 	int ret = 0;
42543e610bbSSunila Sahu 	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
42643e610bbSSunila Sahu 	struct rte_compressdev *compressdev;
42743e610bbSSunila Sahu 	struct rte_compressdev_pmd_init_params init_params = {
42843e610bbSSunila Sahu 		"",
42943e610bbSSunila Sahu 		rte_socket_id(),
43043e610bbSSunila Sahu 	};
43143e610bbSSunila Sahu 
43243e610bbSSunila Sahu 	ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
43343e610bbSSunila Sahu 			(unsigned int)pci_dev->id.vendor_id,
43443e610bbSSunila Sahu 			(unsigned int)pci_dev->id.device_id);
43543e610bbSSunila Sahu 
43643e610bbSSunila Sahu 	rte_pci_device_name(&pci_dev->addr, compressdev_name,
43743e610bbSSunila Sahu 			    sizeof(compressdev_name));
43843e610bbSSunila Sahu 
43943e610bbSSunila Sahu 	compressdev = rte_compressdev_pmd_create(compressdev_name,
44043e610bbSSunila Sahu 		&pci_dev->device, sizeof(struct zip_vf), &init_params);
44143e610bbSSunila Sahu 	if (compressdev == NULL) {
44243e610bbSSunila Sahu 		ZIP_PMD_ERR("driver %s: create failed", init_params.name);
44343e610bbSSunila Sahu 		return -ENODEV;
44443e610bbSSunila Sahu 	}
44543e610bbSSunila Sahu 
44643e610bbSSunila Sahu 	/*
44743e610bbSSunila Sahu 	 * create only if proc_type is primary.
44843e610bbSSunila Sahu 	 */
44943e610bbSSunila Sahu 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
45043e610bbSSunila Sahu 		/*  create vf dev with given pmd dev id */
45143e610bbSSunila Sahu 		ret = zipvf_create(compressdev);
45243e610bbSSunila Sahu 		if (ret < 0) {
45343e610bbSSunila Sahu 			ZIP_PMD_ERR("Device creation failed");
45443e610bbSSunila Sahu 			rte_compressdev_pmd_destroy(compressdev);
45543e610bbSSunila Sahu 			return ret;
45643e610bbSSunila Sahu 		}
45743e610bbSSunila Sahu 	}
45843e610bbSSunila Sahu 
45943e610bbSSunila Sahu 	compressdev->dev_ops = &octtx_zip_pmd_ops;
46043e610bbSSunila Sahu 	/* register rx/tx burst functions for data path */
46143e610bbSSunila Sahu 	compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
46243e610bbSSunila Sahu 	return ret;
46343e610bbSSunila Sahu }
46443e610bbSSunila Sahu 
46543e610bbSSunila Sahu static int
46643e610bbSSunila Sahu zip_pci_remove(struct rte_pci_device *pci_dev)
46743e610bbSSunila Sahu {
46843e610bbSSunila Sahu 	struct rte_compressdev *compressdev;
46943e610bbSSunila Sahu 	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
47043e610bbSSunila Sahu 
47143e610bbSSunila Sahu 	if (pci_dev == NULL) {
47243e610bbSSunila Sahu 		ZIP_PMD_ERR(" Invalid PCI Device\n");
47343e610bbSSunila Sahu 		return -EINVAL;
47443e610bbSSunila Sahu 	}
47543e610bbSSunila Sahu 	rte_pci_device_name(&pci_dev->addr, compressdev_name,
47643e610bbSSunila Sahu 			sizeof(compressdev_name));
47743e610bbSSunila Sahu 
47843e610bbSSunila Sahu 	compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
47943e610bbSSunila Sahu 	if (compressdev == NULL)
48043e610bbSSunila Sahu 		return -ENODEV;
48143e610bbSSunila Sahu 
48243e610bbSSunila Sahu 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
48343e610bbSSunila Sahu 		if (zipvf_destroy(compressdev) < 0)
48443e610bbSSunila Sahu 			return -ENODEV;
48543e610bbSSunila Sahu 	}
48643e610bbSSunila Sahu 	return rte_compressdev_pmd_destroy(compressdev);
48743e610bbSSunila Sahu }
48843e610bbSSunila Sahu 
48943e610bbSSunila Sahu static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
49043e610bbSSunila Sahu 	{
49143e610bbSSunila Sahu 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
49243e610bbSSunila Sahu 			PCI_DEVICE_ID_OCTEONTX_ZIPVF),
49343e610bbSSunila Sahu 	},
49443e610bbSSunila Sahu 	{
49543e610bbSSunila Sahu 		.device_id = 0
49643e610bbSSunila Sahu 	},
49743e610bbSSunila Sahu };
49843e610bbSSunila Sahu 
49943e610bbSSunila Sahu /**
50043e610bbSSunila Sahu  * Structure that represents a PCI driver
50143e610bbSSunila Sahu  */
50243e610bbSSunila Sahu static struct rte_pci_driver octtx_zip_pmd = {
50343e610bbSSunila Sahu 	.id_table    = pci_id_octtx_zipvf_table,
50443e610bbSSunila Sahu 	.drv_flags   = RTE_PCI_DRV_NEED_MAPPING,
50543e610bbSSunila Sahu 	.probe       = zip_pci_probe,
50643e610bbSSunila Sahu 	.remove      = zip_pci_remove,
50743e610bbSSunila Sahu };
50843e610bbSSunila Sahu 
50943e610bbSSunila Sahu RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
51043e610bbSSunila Sahu RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
51143e610bbSSunila Sahu 
51243e610bbSSunila Sahu RTE_INIT(octtx_zip_init_log);
51343e610bbSSunila Sahu 
51443e610bbSSunila Sahu static void
51543e610bbSSunila Sahu octtx_zip_init_log(void)
51643e610bbSSunila Sahu {
51743e610bbSSunila Sahu 	octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
51843e610bbSSunila Sahu 	if (octtx_zip_logtype_driver >= 0)
51943e610bbSSunila Sahu 		rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO);
52043e610bbSSunila Sahu }
521