xref: /dpdk/drivers/compress/octeontx/otx_zip_pmd.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4 
5 #include <string.h>
6 
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_cpuflags.h>
10 #include <rte_malloc.h>
11 
12 #include "otx_zip.h"
13 
14 static const struct rte_compressdev_capabilities
15 				octtx_zip_pmd_capabilities[] = {
16 	{	.algo = RTE_COMP_ALGO_DEFLATE,
17 		/* Deflate */
18 		.comp_feature_flags =	RTE_COMP_FF_HUFFMAN_FIXED |
19 					RTE_COMP_FF_HUFFMAN_DYNAMIC |
20 					RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
21 					RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
22 					RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
23 		/* Non sharable Priv XFORM and Stateless */
24 		.window_size = {
25 				.min = 1,
26 				.max = 14,
27 				.increment = 1
28 				/* size supported 2^1 to 2^14 */
29 		},
30 	},
31 	RTE_COMP_END_OF_CAPABILITIES_LIST()
32 };
33 
34 /*
35  * Reset session to default state for next set of stateless operation
36  */
37 static inline void
38 reset_stream(union zip_inst_s *inst)
39 {
40 	inst->s.bf = 1;
41 	inst->s.ef = 0;
42 }
43 
44 int
45 zip_process_op(struct rte_comp_op *op,
46 		struct zipvf_qp *qp,
47 		struct zip_stream *zstrm, int num)
48 {
49 	union zip_inst_s *inst = zstrm->inst[num];
50 	volatile union zip_zres_s *zresult = NULL;
51 
52 	if (op->m_src->nb_segs > 1)
53 		if (rte_mempool_get(qp->vf->sg_mp, (void *)&qp->g_info) < 0) {
54 			ZIP_PMD_ERR("Can't1 allocate object from SG pool");
55 			return (-ENOMEM);
56 		}
57 
58 	if (op->m_dst->nb_segs > 1)
59 		if (rte_mempool_get(qp->vf->sg_mp, (void *)&qp->s_info) < 0) {
60 			rte_mempool_put(qp->vf->sg_mp, qp->g_info);
61 			ZIP_PMD_ERR("Can't allocate object from SG pool");
62 			return (-ENOMEM);
63 		}
64 
65 	if (zipvf_prepare_cmd_stateless(op, qp, inst)) {
66 		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
67 		rte_mempool_put(qp->vf->sg_mp, qp->g_info);
68 		rte_mempool_put(qp->vf->sg_mp, qp->s_info);
69 
70 		ZIP_PMD_ERR("Can't fill SGL buffers");
71 		return -EINVAL;
72 	}
73 
74 	zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF + num];
75 	zresult->s.compcode = 0;
76 
77 #ifdef ZIP_DBG
78 	zip_dump_instruction(inst);
79 #endif
80 
81 	/* Submit zip command */
82 	zipvf_push_command(qp, (void *)inst);
83 
84 	return 0;
85 }
86 
87 /** Parse xform parameters and setup a stream */
88 static int
89 zip_set_stream_parameters(struct rte_compressdev *dev,
90 			const struct rte_comp_xform *xform,
91 			struct zip_stream *z_stream)
92 {
93 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
94 	union zip_inst_s *inst;
95 	void *res;
96 	int ret, i;
97 
98 	/* Allocate resources required by a stream */
99 	ret = rte_mempool_get_bulk(vf->zip_mp,
100 			z_stream->bufs, (MAX_BUFS_PER_STREAM * ZIP_BURST_SIZE));
101 	if (ret < 0)
102 		return -1;
103 
104 	for (i = 0; i < ZIP_BURST_SIZE; i++) {
105 		/* get one command buffer from pool and set up */
106 		inst = (union zip_inst_s *)z_stream->bufs[(CMD_BUF * ZIP_BURST_SIZE) + i];
107 		res = z_stream->bufs[(RES_BUF * ZIP_BURST_SIZE) + i];
108 		memset(inst->u, 0, sizeof(inst->u));
109 
110 		/* set bf for only first ops of stream */
111 		inst->s.bf = 1;
112 
113 		if (xform->type == RTE_COMP_COMPRESS) {
114 			inst->s.op = ZIP_OP_E_COMP;
115 
116 			switch (xform->compress.deflate.huffman) {
117 			case RTE_COMP_HUFFMAN_DEFAULT:
118 				inst->s.cc = ZIP_CC_DEFAULT;
119 				break;
120 			case RTE_COMP_HUFFMAN_FIXED:
121 				inst->s.cc = ZIP_CC_FIXED_HUFF;
122 				break;
123 			case RTE_COMP_HUFFMAN_DYNAMIC:
124 				inst->s.cc = ZIP_CC_DYN_HUFF;
125 				break;
126 			default:
127 				ret = -1;
128 				goto err;
129 			}
130 
131 			switch (xform->compress.level) {
132 			case RTE_COMP_LEVEL_MIN:
133 				inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
134 				break;
135 			case RTE_COMP_LEVEL_MAX:
136 				inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
137 				break;
138 			case RTE_COMP_LEVEL_NONE:
139 				ZIP_PMD_ERR("Compression level not supported");
140 				ret = -1;
141 				goto err;
142 			default:
143 				/* for any value between min and max , choose
144 				 * PMD default.
145 				 */
146 				inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
147 				break;
148 			}
149 		} else if (xform->type == RTE_COMP_DECOMPRESS) {
150 			inst->s.op = ZIP_OP_E_DECOMP;
151 			/* from HRM,
152 			 * For DEFLATE decompression, [CC] must be 0x0.
153 			 * For decompression, [SS] must be 0x0
154 			 */
155 			inst->s.cc = 0;
156 			/* Speed bit should not be set for decompression */
157 			inst->s.ss = 0;
158 			/* decompression context is supported only for STATEFUL
159 			 * operations. Currently we support STATELESS ONLY so
160 			 * skip setting of ctx pointer
161 			 */
162 
163 		} else {
164 			ZIP_PMD_ERR("xform type not supported");
165 			ret = -1;
166 			goto err;
167 		}
168 
169 		inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
170 		inst->s.res_ptr_ctl.s.length = 0;
171 
172 		z_stream->inst[i] = inst;
173 	}
174 
175 	z_stream->func = zip_process_op;
176 
177 	return 0;
178 
179 err:
180 	rte_mempool_put_bulk(vf->zip_mp,
181 			     (void *)&(z_stream->bufs[0]),
182 			     (MAX_BUFS_PER_STREAM * ZIP_BURST_SIZE));
183 
184 	return ret;
185 }
186 
187 /** Configure device */
188 static int
189 zip_pmd_config(struct rte_compressdev *dev,
190 		struct rte_compressdev_config *config)
191 {
192 	char res_pool[RTE_MEMZONE_NAMESIZE];
193 	char sg_pool[RTE_MEMZONE_NAMESIZE];
194 	struct rte_mempool *zip_buf_mp;
195 	struct rte_mempool *zip_sg_mp;
196 	struct zip_vf *vf;
197 	int nb_streams;
198 
199 	if (!config || !dev)
200 		return -EIO;
201 
202 	vf = (struct zip_vf *)(dev->data->dev_private);
203 
204 	/* create pool with maximum numbers of resources
205 	 * required by streams
206 	 */
207 
208 	/* use common pool for non-shareable priv_xform and stream */
209 	nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
210 
211 	snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
212 		 dev->data->dev_id);
213 
214 	snprintf(sg_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_sg_pool%u",
215 		 dev->data->dev_id);
216 
217 	/** TBD Should we use the per core object cache for stream resources */
218 	zip_buf_mp = rte_mempool_create(
219 			res_pool,
220 			(nb_streams * MAX_BUFS_PER_STREAM * ZIP_BURST_SIZE),
221 			ZIP_BUF_SIZE,
222 			0,
223 			0,
224 			NULL,
225 			NULL,
226 			NULL,
227 			NULL,
228 			SOCKET_ID_ANY,
229 			0);
230 
231 	if (zip_buf_mp == NULL) {
232 		ZIP_PMD_ERR(
233 			"Failed to create buf mempool octtx_zip_res_pool%u",
234 			dev->data->dev_id);
235 		return -1;
236 	}
237 
238 	/* Scatter gather buffer pool */
239 	zip_sg_mp = rte_mempool_create(
240 			sg_pool,
241 			(2 * nb_streams * ZIP_BURST_SIZE * ZIP_MAX_SEGS),
242 			ZIP_SGBUF_SIZE,
243 			0,
244 			0,
245 			NULL,
246 			NULL,
247 			NULL,
248 			NULL,
249 			SOCKET_ID_ANY,
250 			MEMPOOL_F_NO_SPREAD);
251 
252 	if (zip_sg_mp == NULL) {
253 		ZIP_PMD_ERR("Failed to create SG buf mempool octtx_zip_sg_pool%u",
254 			    dev->data->dev_id);
255 
256 		rte_mempool_free(vf->zip_mp);
257 		return -1;
258 	}
259 
260 	vf->zip_mp = zip_buf_mp;
261 	vf->sg_mp = zip_sg_mp;
262 
263 	return 0;
264 }
265 
266 /** Start device */
267 static int
268 zip_pmd_start(__rte_unused struct rte_compressdev *dev)
269 {
270 	return 0;
271 }
272 
273 /** Stop device */
274 static void
275 zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
276 {
277 
278 }
279 
280 /** Close device */
281 static int
282 zip_pmd_close(struct rte_compressdev *dev)
283 {
284 	if (dev == NULL)
285 		return -1;
286 
287 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
288 	rte_mempool_free(vf->zip_mp);
289 	rte_mempool_free(vf->sg_mp);
290 
291 	return 0;
292 }
293 
294 /** Get device statistics */
295 static void
296 zip_pmd_stats_get(struct rte_compressdev *dev,
297 		struct rte_compressdev_stats *stats)
298 {
299 	int qp_id;
300 
301 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
302 		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
303 
304 		stats->enqueued_count += qp->qp_stats.enqueued_count;
305 		stats->dequeued_count += qp->qp_stats.dequeued_count;
306 
307 		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
308 		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
309 	}
310 }
311 
312 /** Reset device statistics */
313 static void
314 zip_pmd_stats_reset(struct rte_compressdev *dev)
315 {
316 	int qp_id;
317 
318 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
319 		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
320 		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
321 	}
322 }
323 
324 /** Get device info */
325 static void
326 zip_pmd_info_get(struct rte_compressdev *dev,
327 		struct rte_compressdev_info *dev_info)
328 {
329 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
330 
331 	if (dev_info != NULL) {
332 		dev_info->driver_name = dev->device->driver->name;
333 		dev_info->feature_flags = dev->feature_flags;
334 		dev_info->capabilities = octtx_zip_pmd_capabilities;
335 		dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
336 	}
337 }
338 
339 /** Release queue pair */
340 static int
341 zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
342 {
343 	struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
344 
345 	if (qp != NULL) {
346 		zipvf_q_term(qp);
347 
348 		rte_ring_free(qp->processed_pkts);
349 
350 		rte_free(qp);
351 		dev->data->queue_pairs[qp_id] = NULL;
352 	}
353 	return 0;
354 }
355 
356 /** Create a ring to place process packets on */
357 static struct rte_ring *
358 zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
359 		unsigned int ring_size, int socket_id)
360 {
361 	struct rte_ring *r;
362 
363 	r = rte_ring_lookup(qp->name);
364 	if (r) {
365 		if (rte_ring_get_size(r) >= ring_size) {
366 			ZIP_PMD_INFO("Reusing existing ring %s for processed"
367 					" packets", qp->name);
368 			return r;
369 		}
370 
371 		ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
372 				" packets", qp->name);
373 		return NULL;
374 	}
375 
376 	return rte_ring_create(qp->name, ring_size, socket_id,
377 						RING_F_EXACT_SZ);
378 }
379 
380 /** Setup a queue pair */
381 static int
382 zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
383 		uint32_t max_inflight_ops, int socket_id)
384 {
385 	struct zipvf_qp *qp = NULL;
386 	struct zip_vf *vf;
387 	char *name;
388 	int ret;
389 
390 	if (!dev)
391 		return -1;
392 
393 	vf = (struct zip_vf *) (dev->data->dev_private);
394 
395 	/* Free memory prior to re-allocation if needed. */
396 	if (dev->data->queue_pairs[qp_id] != NULL) {
397 		ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
398 		return 0;
399 	}
400 
401 	name =  rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
402 	if (name == NULL)
403 		return (-ENOMEM);
404 	snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
405 		 "zip_pmd_%u_qp_%u",
406 		 dev->data->dev_id, qp_id);
407 
408 	/* Allocate the queue pair data structure. */
409 	qp = rte_zmalloc_socket(name, sizeof(*qp),
410 				RTE_CACHE_LINE_SIZE, socket_id);
411 	if (qp == NULL) {
412 		rte_free(name);
413 		return (-ENOMEM);
414 	}
415 
416 	qp->name = name;
417 
418 	/* Create completion queue up to max_inflight_ops */
419 	qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
420 						max_inflight_ops, socket_id);
421 	if (qp->processed_pkts == NULL)
422 		goto qp_setup_cleanup;
423 
424 	qp->id = qp_id;
425 	qp->vf = vf;
426 
427 	ret = zipvf_q_init(qp);
428 	if (ret < 0)
429 		goto qp_setup_cleanup;
430 
431 	dev->data->queue_pairs[qp_id] = qp;
432 
433 	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
434 	return 0;
435 
436 qp_setup_cleanup:
437 	rte_ring_free(qp->processed_pkts);
438 	rte_free(qp);
439 	return -1;
440 }
441 
442 static int
443 zip_pmd_stream_create(struct rte_compressdev *dev,
444 		const struct rte_comp_xform *xform, void **stream)
445 {
446 	int ret;
447 	struct zip_stream *strm = NULL;
448 
449 	strm = rte_malloc(NULL,
450 			sizeof(struct zip_stream), 0);
451 
452 	if (strm == NULL)
453 		return (-ENOMEM);
454 
455 	ret = zip_set_stream_parameters(dev, xform, strm);
456 	if (ret < 0) {
457 		ZIP_PMD_ERR("failed configure xform parameters");
458 		rte_free(strm);
459 		return ret;
460 	}
461 	*stream = strm;
462 
463 	return 0;
464 }
465 
466 static int
467 zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
468 {
469 	struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
470 	struct zip_stream *z_stream;
471 
472 	if (stream == NULL)
473 		return 0;
474 
475 	z_stream = (struct zip_stream *)stream;
476 
477 	/* Free resources back to pool */
478 	rte_mempool_put_bulk(vf->zip_mp,
479 				(void *)&(z_stream->bufs[0]),
480 				(MAX_BUFS_PER_STREAM * ZIP_BURST_SIZE));
481 
482 	/* Zero out the whole structure */
483 	memset(stream, 0, sizeof(struct zip_stream));
484 	rte_free(stream);
485 
486 	return 0;
487 }
488 
489 
490 static uint16_t
491 zip_pmd_enqueue_burst(void *queue_pair,
492 		struct rte_comp_op **ops, uint16_t nb_ops)
493 {
494 	struct zipvf_qp *qp = queue_pair;
495 	struct zip_stream *zstrm;
496 	struct rte_comp_op *op;
497 	int i, ret = 0;
498 	uint16_t enqd = 0;
499 
500 	for (i = 0; i < nb_ops; i++) {
501 		op = ops[i];
502 
503 		if (op->op_type == RTE_COMP_OP_STATEFUL) {
504 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
505 		} else {
506 			/* process stateless ops */
507 			zstrm = (struct zip_stream *)op->private_xform;
508 			if (unlikely(zstrm == NULL))
509 				op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
510 			else
511 				ret = zstrm->func(op, qp, zstrm, i);
512 		}
513 
514 		/* Whatever is out of op, put it into completion queue with
515 		 * its status
516 		 */
517 		if (!ret)
518 			ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
519 
520 		if (unlikely(ret < 0)) {
521 			/* increment count if failed to enqueue op*/
522 			qp->qp_stats.enqueue_err_count++;
523 		} else {
524 			qp->qp_stats.enqueued_count++;
525 			enqd++;
526 		}
527 	}
528 
529 	qp->enqed = enqd;
530 	ZIP_PMD_LOG(DEBUG, "ops_enqd[nb_ops:%d]:%d", nb_ops, enqd);
531 
532 	return enqd;
533 }
534 
535 static uint16_t
536 zip_pmd_dequeue_burst(void *queue_pair,
537 		struct rte_comp_op **ops, uint16_t nb_ops)
538 {
539 	volatile union zip_zres_s *zresult = NULL;
540 	struct zipvf_qp *qp = queue_pair;
541 
542 	unsigned int nb_dequeued = 0;
543 	struct zip_stream *zstrm;
544 	struct rte_comp_op *op;
545 	unsigned int i;
546 
547 	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
548 				(void **)ops, nb_ops, NULL);
549 	qp->qp_stats.dequeued_count += nb_dequeued;
550 
551 	/* Dequeue all the submitted ops */
552 	for (i = 0; i < nb_dequeued; i++) {
553 		op = ops[i];
554 		/* process stateless ops */
555 		zstrm = (struct zip_stream *)op->private_xform;
556 		zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF + i];
557 
558 		/* Check and Process results in sync mode */
559 		do {
560 		} while (!zresult->s.compcode);
561 
562 		if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
563 			op->status = RTE_COMP_OP_STATUS_SUCCESS;
564 		} else {
565 			/* FATAL error cannot do anything */
566 			ZIP_PMD_ERR("operation failed with error code:%d",
567 				zresult->s.compcode);
568 			if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
569 				op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
570 			else
571 				op->status = RTE_COMP_OP_STATUS_ERROR;
572 		}
573 
574 		ZIP_PMD_LOG(DEBUG, "written %d", zresult->s.totalbyteswritten);
575 
576 		/* Update op stats */
577 		switch (op->status) {
578 		case RTE_COMP_OP_STATUS_SUCCESS:
579 			op->consumed = zresult->s.totalbytesread;
580 		/* Fall-through */
581 		case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
582 			op->produced = zresult->s.totalbyteswritten;
583 			break;
584 		default:
585 			ZIP_PMD_ERR("stats not updated for status:%d",
586 				    op->status);
587 			break;
588 		}
589 
590 		/* zstream is reset irrespective of result */
591 		reset_stream(zstrm->inst[i]);
592 		zresult->s.compcode = ZIP_COMP_E_NOTDONE;
593 
594 		if (op->m_src->nb_segs > 1)
595 			rte_mempool_put(qp->vf->sg_mp, qp->g_info);
596 
597 		if (op->m_dst->nb_segs > 1)
598 			rte_mempool_put(qp->vf->sg_mp, qp->s_info);
599 	}
600 
601 	ZIP_PMD_LOG(DEBUG, "ops_deqd[nb_ops:%d]: %d", nb_ops, nb_dequeued);
602 	return nb_dequeued;
603 }
604 
605 static struct rte_compressdev_ops octtx_zip_pmd_ops = {
606 		.dev_configure		= zip_pmd_config,
607 		.dev_start		= zip_pmd_start,
608 		.dev_stop		= zip_pmd_stop,
609 		.dev_close		= zip_pmd_close,
610 
611 		.stats_get		= zip_pmd_stats_get,
612 		.stats_reset		= zip_pmd_stats_reset,
613 
614 		.dev_infos_get		= zip_pmd_info_get,
615 
616 		.queue_pair_setup	= zip_pmd_qp_setup,
617 		.queue_pair_release	= zip_pmd_qp_release,
618 
619 		.private_xform_create	= zip_pmd_stream_create,
620 		.private_xform_free	= zip_pmd_stream_free,
621 		.stream_create		= NULL,
622 		.stream_free		= NULL
623 };
624 
625 static int
626 zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
627 	struct rte_pci_device *pci_dev)
628 {
629 	int ret = 0;
630 	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
631 	struct rte_compressdev *compressdev;
632 	struct rte_compressdev_pmd_init_params init_params = {
633 		"",
634 		rte_socket_id(),
635 	};
636 
637 	ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
638 			(unsigned int)pci_dev->id.vendor_id,
639 			(unsigned int)pci_dev->id.device_id);
640 
641 	rte_pci_device_name(&pci_dev->addr, compressdev_name,
642 			    sizeof(compressdev_name));
643 
644 	compressdev = rte_compressdev_pmd_create(compressdev_name,
645 		&pci_dev->device, sizeof(struct zip_vf), &init_params);
646 	if (compressdev == NULL) {
647 		ZIP_PMD_ERR("driver %s: create failed", init_params.name);
648 		return -ENODEV;
649 	}
650 
651 	/*
652 	 * create only if proc_type is primary.
653 	 */
654 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
655 		/*  create vf dev with given pmd dev id */
656 		ret = zipvf_create(compressdev);
657 		if (ret < 0) {
658 			ZIP_PMD_ERR("Device creation failed");
659 			rte_compressdev_pmd_destroy(compressdev);
660 			return ret;
661 		}
662 	}
663 
664 	compressdev->dev_ops = &octtx_zip_pmd_ops;
665 	/* register rx/tx burst functions for data path */
666 	compressdev->dequeue_burst = zip_pmd_dequeue_burst;
667 	compressdev->enqueue_burst = zip_pmd_enqueue_burst;
668 	compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
669 	return ret;
670 }
671 
672 static int
673 zip_pci_remove(struct rte_pci_device *pci_dev)
674 {
675 	struct rte_compressdev *compressdev;
676 	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
677 
678 	if (pci_dev == NULL) {
679 		ZIP_PMD_ERR(" Invalid PCI Device");
680 		return -EINVAL;
681 	}
682 	rte_pci_device_name(&pci_dev->addr, compressdev_name,
683 			sizeof(compressdev_name));
684 
685 	compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
686 	if (compressdev == NULL)
687 		return -ENODEV;
688 
689 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
690 		if (zipvf_destroy(compressdev) < 0)
691 			return -ENODEV;
692 	}
693 	return rte_compressdev_pmd_destroy(compressdev);
694 }
695 
696 static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
697 	{
698 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
699 			PCI_DEVICE_ID_OCTEONTX_ZIPVF),
700 	},
701 	{
702 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
703 			PCI_DEVICE_ID_OCTEONTX2_ZIPVF),
704 	},
705 	{
706 		.device_id = 0
707 	},
708 };
709 
710 /**
711  * Structure that represents a PCI driver
712  */
713 static struct rte_pci_driver octtx_zip_pmd = {
714 	.id_table    = pci_id_octtx_zipvf_table,
715 	.drv_flags   = RTE_PCI_DRV_NEED_MAPPING,
716 	.probe       = zip_pci_probe,
717 	.remove      = zip_pci_remove,
718 };
719 
720 RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
721 RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
722 RTE_LOG_REGISTER_DEFAULT(octtx_zip_logtype_driver, INFO);
723