xref: /dpdk/drivers/compress/octeontx/otx_zip_pmd.c (revision af0785a2447b307965377b62f46a5f39457a85a3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4 
5 #include <string.h>
6 
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_cpuflags.h>
10 #include <rte_malloc.h>
11 
12 #include "otx_zip.h"
13 
14 static const struct rte_compressdev_capabilities
15 				octtx_zip_pmd_capabilities[] = {
16 	{	.algo = RTE_COMP_ALGO_DEFLATE,
17 		/* Deflate */
18 		.comp_feature_flags =	RTE_COMP_FF_HUFFMAN_FIXED |
19 					RTE_COMP_FF_HUFFMAN_DYNAMIC,
20 		/* Non sharable Priv XFORM and Stateless */
21 		.window_size = {
22 				.min = 1,
23 				.max = 14,
24 				.increment = 1
25 				/* size supported 2^1 to 2^14 */
26 		},
27 	},
28 	RTE_COMP_END_OF_CAPABILITIES_LIST()
29 };
30 
31 /*
32  * Reset session to default state for next set of stateless operation
33  */
34 static inline void
35 reset_stream(union zip_inst_s *inst)
36 {
37 	inst->s.bf = 1;
38 	inst->s.ef = 0;
39 }
40 
41 int
42 zip_process_op(struct rte_comp_op *op,
43 		struct zipvf_qp *qp,
44 		struct zip_stream *zstrm, int num)
45 {
46 	union zip_inst_s *inst = zstrm->inst[num];
47 	volatile union zip_zres_s *zresult = NULL;
48 
49 	if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) ||
50 			(op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) ||
51 			(op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) {
52 		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
53 		ZIP_PMD_ERR("Segmented packet is not supported\n");
54 		return 0;
55 	}
56 
57 	zipvf_prepare_cmd_stateless(op, inst);
58 
59 	zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF + num];
60 	zresult->s.compcode = 0;
61 
62 #ifdef ZIP_DBG
63 	zip_dump_instruction(inst);
64 #endif
65 
66 	/* Submit zip command */
67 	zipvf_push_command(qp, (void *)inst);
68 
69 	return 0;
70 }
71 
72 /** Parse xform parameters and setup a stream */
73 static int
74 zip_set_stream_parameters(struct rte_compressdev *dev,
75 			const struct rte_comp_xform *xform,
76 			struct zip_stream *z_stream)
77 {
78 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
79 	union zip_inst_s *inst;
80 	void *res;
81 	int ret, i;
82 
83 	/* Allocate resources required by a stream */
84 	ret = rte_mempool_get_bulk(vf->zip_mp,
85 			z_stream->bufs, (MAX_BUFS_PER_STREAM * ZIP_BURST_SIZE));
86 	if (ret < 0)
87 		return -1;
88 
89 	for (i = 0; i < ZIP_BURST_SIZE; i++) {
90 		/* get one command buffer from pool and set up */
91 		inst = (union zip_inst_s *)z_stream->bufs[(CMD_BUF * ZIP_BURST_SIZE) + i];
92 		res = z_stream->bufs[(RES_BUF * ZIP_BURST_SIZE) + i];
93 		memset(inst->u, 0, sizeof(inst->u));
94 
95 		/* set bf for only first ops of stream */
96 		inst->s.bf = 1;
97 
98 		if (xform->type == RTE_COMP_COMPRESS) {
99 			inst->s.op = ZIP_OP_E_COMP;
100 
101 			switch (xform->compress.deflate.huffman) {
102 			case RTE_COMP_HUFFMAN_DEFAULT:
103 				inst->s.cc = ZIP_CC_DEFAULT;
104 				break;
105 			case RTE_COMP_HUFFMAN_FIXED:
106 				inst->s.cc = ZIP_CC_FIXED_HUFF;
107 				break;
108 			case RTE_COMP_HUFFMAN_DYNAMIC:
109 				inst->s.cc = ZIP_CC_DYN_HUFF;
110 				break;
111 			default:
112 				ret = -1;
113 				goto err;
114 			}
115 
116 			switch (xform->compress.level) {
117 			case RTE_COMP_LEVEL_MIN:
118 				inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
119 				break;
120 			case RTE_COMP_LEVEL_MAX:
121 				inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
122 				break;
123 			case RTE_COMP_LEVEL_NONE:
124 				ZIP_PMD_ERR("Compression level not supported");
125 				ret = -1;
126 				goto err;
127 			default:
128 				/* for any value between min and max , choose
129 				 * PMD default.
130 				 */
131 				inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
132 				break;
133 			}
134 		} else if (xform->type == RTE_COMP_DECOMPRESS) {
135 			inst->s.op = ZIP_OP_E_DECOMP;
136 			/* from HRM,
137 			 * For DEFLATE decompression, [CC] must be 0x0.
138 			 * For decompression, [SS] must be 0x0
139 			 */
140 			inst->s.cc = 0;
141 			/* Speed bit should not be set for decompression */
142 			inst->s.ss = 0;
143 			/* decompression context is supported only for STATEFUL
144 			 * operations. Currently we support STATELESS ONLY so
145 			 * skip setting of ctx pointer
146 			 */
147 
148 		} else {
149 			ZIP_PMD_ERR("\nxform type not supported");
150 			ret = -1;
151 			goto err;
152 		}
153 
154 		inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
155 		inst->s.res_ptr_ctl.s.length = 0;
156 
157 		z_stream->inst[i] = inst;
158 	}
159 
160 	z_stream->func = zip_process_op;
161 
162 	return 0;
163 
164 err:
165 	rte_mempool_put_bulk(vf->zip_mp,
166 			     (void *)&(z_stream->bufs[0]),
167 			     (MAX_BUFS_PER_STREAM * ZIP_BURST_SIZE));
168 
169 	return ret;
170 }
171 
172 /** Configure device */
173 static int
174 zip_pmd_config(struct rte_compressdev *dev,
175 		struct rte_compressdev_config *config)
176 {
177 	int nb_streams;
178 	char res_pool[RTE_MEMZONE_NAMESIZE];
179 	struct zip_vf *vf;
180 	struct rte_mempool *zip_buf_mp;
181 
182 	if (!config || !dev)
183 		return -EIO;
184 
185 	vf = (struct zip_vf *)(dev->data->dev_private);
186 
187 	/* create pool with maximum numbers of resources
188 	 * required by streams
189 	 */
190 
191 	/* use common pool for non-shareable priv_xform and stream */
192 	nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
193 
194 	snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
195 		 dev->data->dev_id);
196 
197 	/** TBD Should we use the per core object cache for stream resources */
198 	zip_buf_mp = rte_mempool_create(
199 			res_pool,
200 			(nb_streams * MAX_BUFS_PER_STREAM * ZIP_BURST_SIZE),
201 			ZIP_BUF_SIZE,
202 			0,
203 			0,
204 			NULL,
205 			NULL,
206 			NULL,
207 			NULL,
208 			SOCKET_ID_ANY,
209 			0);
210 
211 	if (zip_buf_mp == NULL) {
212 		ZIP_PMD_ERR(
213 			"Failed to create buf mempool octtx_zip_res_pool%u",
214 			dev->data->dev_id);
215 		return -1;
216 	}
217 
218 	vf->zip_mp = zip_buf_mp;
219 
220 	return 0;
221 }
222 
223 /** Start device */
224 static int
225 zip_pmd_start(__rte_unused struct rte_compressdev *dev)
226 {
227 	return 0;
228 }
229 
230 /** Stop device */
231 static void
232 zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
233 {
234 
235 }
236 
237 /** Close device */
238 static int
239 zip_pmd_close(struct rte_compressdev *dev)
240 {
241 	if (dev == NULL)
242 		return -1;
243 
244 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
245 	rte_mempool_free(vf->zip_mp);
246 
247 	return 0;
248 }
249 
250 /** Get device statistics */
251 static void
252 zip_pmd_stats_get(struct rte_compressdev *dev,
253 		struct rte_compressdev_stats *stats)
254 {
255 	int qp_id;
256 
257 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
258 		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
259 
260 		stats->enqueued_count += qp->qp_stats.enqueued_count;
261 		stats->dequeued_count += qp->qp_stats.dequeued_count;
262 
263 		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
264 		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
265 	}
266 }
267 
268 /** Reset device statistics */
269 static void
270 zip_pmd_stats_reset(struct rte_compressdev *dev)
271 {
272 	int qp_id;
273 
274 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
275 		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
276 		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
277 	}
278 }
279 
280 /** Get device info */
281 static void
282 zip_pmd_info_get(struct rte_compressdev *dev,
283 		struct rte_compressdev_info *dev_info)
284 {
285 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
286 
287 	if (dev_info != NULL) {
288 		dev_info->driver_name = dev->device->driver->name;
289 		dev_info->feature_flags = dev->feature_flags;
290 		dev_info->capabilities = octtx_zip_pmd_capabilities;
291 		dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
292 	}
293 }
294 
295 /** Release queue pair */
296 static int
297 zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
298 {
299 	struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
300 
301 	if (qp != NULL) {
302 		zipvf_q_term(qp);
303 
304 		rte_ring_free(qp->processed_pkts);
305 
306 		rte_free(qp);
307 		dev->data->queue_pairs[qp_id] = NULL;
308 	}
309 	return 0;
310 }
311 
312 /** Create a ring to place process packets on */
313 static struct rte_ring *
314 zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
315 		unsigned int ring_size, int socket_id)
316 {
317 	struct rte_ring *r;
318 
319 	r = rte_ring_lookup(qp->name);
320 	if (r) {
321 		if (rte_ring_get_size(r) >= ring_size) {
322 			ZIP_PMD_INFO("Reusing existing ring %s for processed"
323 					" packets", qp->name);
324 			return r;
325 		}
326 
327 		ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
328 				" packets", qp->name);
329 		return NULL;
330 	}
331 
332 	return rte_ring_create(qp->name, ring_size, socket_id,
333 						RING_F_EXACT_SZ);
334 }
335 
336 /** Setup a queue pair */
337 static int
338 zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
339 		uint32_t max_inflight_ops, int socket_id)
340 {
341 	struct zipvf_qp *qp = NULL;
342 	struct zip_vf *vf;
343 	char *name;
344 	int ret;
345 
346 	if (!dev)
347 		return -1;
348 
349 	vf = (struct zip_vf *) (dev->data->dev_private);
350 
351 	/* Free memory prior to re-allocation if needed. */
352 	if (dev->data->queue_pairs[qp_id] != NULL) {
353 		ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
354 		return 0;
355 	}
356 
357 	name =  rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
358 	if (name == NULL)
359 		return (-ENOMEM);
360 	snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
361 		 "zip_pmd_%u_qp_%u",
362 		 dev->data->dev_id, qp_id);
363 
364 	/* Allocate the queue pair data structure. */
365 	qp = rte_zmalloc_socket(name, sizeof(*qp),
366 				RTE_CACHE_LINE_SIZE, socket_id);
367 	if (qp == NULL) {
368 		rte_free(name);
369 		return (-ENOMEM);
370 	}
371 
372 	qp->name = name;
373 
374 	/* Create completion queue up to max_inflight_ops */
375 	qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
376 						max_inflight_ops, socket_id);
377 	if (qp->processed_pkts == NULL)
378 		goto qp_setup_cleanup;
379 
380 	qp->id = qp_id;
381 	qp->vf = vf;
382 
383 	ret = zipvf_q_init(qp);
384 	if (ret < 0)
385 		goto qp_setup_cleanup;
386 
387 	dev->data->queue_pairs[qp_id] = qp;
388 
389 	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
390 	return 0;
391 
392 qp_setup_cleanup:
393 	rte_ring_free(qp->processed_pkts);
394 	rte_free(qp);
395 	return -1;
396 }
397 
398 static int
399 zip_pmd_stream_create(struct rte_compressdev *dev,
400 		const struct rte_comp_xform *xform, void **stream)
401 {
402 	int ret;
403 	struct zip_stream *strm = NULL;
404 
405 	strm = rte_malloc(NULL,
406 			sizeof(struct zip_stream), 0);
407 
408 	if (strm == NULL)
409 		return (-ENOMEM);
410 
411 	ret = zip_set_stream_parameters(dev, xform, strm);
412 	if (ret < 0) {
413 		ZIP_PMD_ERR("failed configure xform parameters");
414 		rte_free(strm);
415 		return ret;
416 	}
417 	*stream = strm;
418 
419 	return 0;
420 }
421 
422 static int
423 zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
424 {
425 	struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
426 	struct zip_stream *z_stream;
427 
428 	if (stream == NULL)
429 		return 0;
430 
431 	z_stream = (struct zip_stream *)stream;
432 
433 	/* Free resources back to pool */
434 	rte_mempool_put_bulk(vf->zip_mp,
435 				(void *)&(z_stream->bufs[0]),
436 				(MAX_BUFS_PER_STREAM * ZIP_BURST_SIZE));
437 
438 	/* Zero out the whole structure */
439 	memset(stream, 0, sizeof(struct zip_stream));
440 	rte_free(stream);
441 
442 	return 0;
443 }
444 
445 
446 static uint16_t
447 zip_pmd_enqueue_burst(void *queue_pair,
448 		struct rte_comp_op **ops, uint16_t nb_ops)
449 {
450 	struct zipvf_qp *qp = queue_pair;
451 	struct zip_stream *zstrm;
452 	struct rte_comp_op *op;
453 	int i, ret = 0;
454 	uint16_t enqd = 0;
455 
456 	for (i = 0; i < nb_ops; i++) {
457 		op = ops[i];
458 
459 		if (op->op_type == RTE_COMP_OP_STATEFUL) {
460 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
461 		} else {
462 			/* process stateless ops */
463 			zstrm = (struct zip_stream *)op->private_xform;
464 			if (unlikely(zstrm == NULL))
465 				op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
466 			else
467 				ret = zstrm->func(op, qp, zstrm, i);
468 		}
469 
470 		/* Whatever is out of op, put it into completion queue with
471 		 * its status
472 		 */
473 		if (!ret)
474 			ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
475 
476 		if (unlikely(ret < 0)) {
477 			/* increment count if failed to enqueue op*/
478 			qp->qp_stats.enqueue_err_count++;
479 		} else {
480 			qp->qp_stats.enqueued_count++;
481 			enqd++;
482 		}
483 	}
484 
485 #ifdef ZIP_DBG
486 	ZIP_PMD_INFO("ops_enqd[nb_ops:%d]:%d\n", nb_ops, enqd);
487 #endif
488 	return enqd;
489 }
490 
491 static uint16_t
492 zip_pmd_dequeue_burst(void *queue_pair,
493 		struct rte_comp_op **ops, uint16_t nb_ops)
494 {
495 	volatile union zip_zres_s *zresult = NULL;
496 	struct zipvf_qp *qp = queue_pair;
497 
498 	unsigned int nb_dequeued = 0;
499 	struct zip_stream *zstrm;
500 	struct rte_comp_op *op;
501 	unsigned int i;
502 
503 	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
504 				(void **)ops, nb_ops, NULL);
505 	qp->qp_stats.dequeued_count += nb_dequeued;
506 
507 	/* Dequeue all the submitted ops */
508 	for (i = 0; i < nb_dequeued; i++) {
509 		op = ops[i];
510 		/* process stateless ops */
511 		zstrm = (struct zip_stream *)op->private_xform;
512 		zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF + i];
513 
514 		/* Check and Process results in sync mode */
515 		do {
516 		} while (!zresult->s.compcode);
517 
518 		if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
519 			op->status = RTE_COMP_OP_STATUS_SUCCESS;
520 		} else {
521 			/* FATAL error cannot do anything */
522 			ZIP_PMD_ERR("operation failed with error code:%d\n",
523 				zresult->s.compcode);
524 			if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
525 				op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
526 			else
527 				op->status = RTE_COMP_OP_STATUS_ERROR;
528 		}
529 
530 	#ifdef ZIP_DBG
531 		ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten);
532 	#endif
533 
534 		/* Update op stats */
535 		switch (op->status) {
536 		case RTE_COMP_OP_STATUS_SUCCESS:
537 			op->consumed = zresult->s.totalbytesread;
538 		/* Fall-through */
539 		case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
540 			op->produced = zresult->s.totalbyteswritten;
541 			break;
542 		default:
543 			ZIP_PMD_ERR("stats not updated for status:%d\n",
544 				    op->status);
545 			break;
546 		}
547 
548 		/* zstream is reset irrespective of result */
549 		reset_stream(zstrm->inst[i]);
550 		zresult->s.compcode = ZIP_COMP_E_NOTDONE;
551 	}
552 
553 #ifdef ZIP_DBG
554 	ZIP_PMD_INFO("ops_deqd[nb_ops:%d]: %d\n", nb_ops, nb_dequeued);
555 #endif
556 	return nb_dequeued;
557 }
558 
559 static struct rte_compressdev_ops octtx_zip_pmd_ops = {
560 		.dev_configure		= zip_pmd_config,
561 		.dev_start		= zip_pmd_start,
562 		.dev_stop		= zip_pmd_stop,
563 		.dev_close		= zip_pmd_close,
564 
565 		.stats_get		= zip_pmd_stats_get,
566 		.stats_reset		= zip_pmd_stats_reset,
567 
568 		.dev_infos_get		= zip_pmd_info_get,
569 
570 		.queue_pair_setup	= zip_pmd_qp_setup,
571 		.queue_pair_release	= zip_pmd_qp_release,
572 
573 		.private_xform_create	= zip_pmd_stream_create,
574 		.private_xform_free	= zip_pmd_stream_free,
575 		.stream_create		= NULL,
576 		.stream_free		= NULL
577 };
578 
579 static int
580 zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
581 	struct rte_pci_device *pci_dev)
582 {
583 	int ret = 0;
584 	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
585 	struct rte_compressdev *compressdev;
586 	struct rte_compressdev_pmd_init_params init_params = {
587 		"",
588 		rte_socket_id(),
589 	};
590 
591 	ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
592 			(unsigned int)pci_dev->id.vendor_id,
593 			(unsigned int)pci_dev->id.device_id);
594 
595 	rte_pci_device_name(&pci_dev->addr, compressdev_name,
596 			    sizeof(compressdev_name));
597 
598 	compressdev = rte_compressdev_pmd_create(compressdev_name,
599 		&pci_dev->device, sizeof(struct zip_vf), &init_params);
600 	if (compressdev == NULL) {
601 		ZIP_PMD_ERR("driver %s: create failed", init_params.name);
602 		return -ENODEV;
603 	}
604 
605 	/*
606 	 * create only if proc_type is primary.
607 	 */
608 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
609 		/*  create vf dev with given pmd dev id */
610 		ret = zipvf_create(compressdev);
611 		if (ret < 0) {
612 			ZIP_PMD_ERR("Device creation failed");
613 			rte_compressdev_pmd_destroy(compressdev);
614 			return ret;
615 		}
616 	}
617 
618 	compressdev->dev_ops = &octtx_zip_pmd_ops;
619 	/* register rx/tx burst functions for data path */
620 	compressdev->dequeue_burst = zip_pmd_dequeue_burst;
621 	compressdev->enqueue_burst = zip_pmd_enqueue_burst;
622 	compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
623 	return ret;
624 }
625 
626 static int
627 zip_pci_remove(struct rte_pci_device *pci_dev)
628 {
629 	struct rte_compressdev *compressdev;
630 	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
631 
632 	if (pci_dev == NULL) {
633 		ZIP_PMD_ERR(" Invalid PCI Device\n");
634 		return -EINVAL;
635 	}
636 	rte_pci_device_name(&pci_dev->addr, compressdev_name,
637 			sizeof(compressdev_name));
638 
639 	compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
640 	if (compressdev == NULL)
641 		return -ENODEV;
642 
643 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
644 		if (zipvf_destroy(compressdev) < 0)
645 			return -ENODEV;
646 	}
647 	return rte_compressdev_pmd_destroy(compressdev);
648 }
649 
650 static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
651 	{
652 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
653 			PCI_DEVICE_ID_OCTEONTX_ZIPVF),
654 	},
655 	{
656 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
657 			PCI_DEVICE_ID_OCTEONTX2_ZIPVF),
658 	},
659 	{
660 		.device_id = 0
661 	},
662 };
663 
664 /**
665  * Structure that represents a PCI driver
666  */
667 static struct rte_pci_driver octtx_zip_pmd = {
668 	.id_table    = pci_id_octtx_zipvf_table,
669 	.drv_flags   = RTE_PCI_DRV_NEED_MAPPING,
670 	.probe       = zip_pci_probe,
671 	.remove      = zip_pci_remove,
672 };
673 
674 RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
675 RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
676 RTE_LOG_REGISTER_DEFAULT(octtx_zip_logtype_driver, INFO);
677