xref: /dpdk/lib/compressdev/rte_compressdev.c (revision 1f37cb2bb46b1fd403faa7c3bf8884e6a4dfde66)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <stdio.h>
7 #include <inttypes.h>
8 
9 #include <rte_string_fns.h>
10 #include <rte_malloc.h>
11 #include <rte_eal.h>
12 #include <rte_memzone.h>
13 
14 #include "rte_compressdev.h"
15 #include "rte_compressdev_internal.h"
16 #include "rte_compressdev_pmd.h"
17 
18 #define RTE_COMPRESSDEV_DETACHED  (0)
19 #define RTE_COMPRESSDEV_ATTACHED  (1)
20 
21 static struct rte_compressdev rte_comp_devices[RTE_COMPRESS_MAX_DEVS];
22 
23 static struct rte_compressdev_global compressdev_globals = {
24 		.devs			= rte_comp_devices,
25 		.data			= { NULL },
26 		.nb_devs		= 0,
27 		.max_devs		= RTE_COMPRESS_MAX_DEVS
28 };
29 
30 const struct rte_compressdev_capabilities *
31 rte_compressdev_capability_get(uint8_t dev_id,
32 			enum rte_comp_algorithm algo)
33 {
34 	const struct rte_compressdev_capabilities *capability;
35 	struct rte_compressdev_info dev_info;
36 	int i = 0;
37 
38 	if (dev_id >= compressdev_globals.nb_devs) {
39 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
40 		return NULL;
41 	}
42 	rte_compressdev_info_get(dev_id, &dev_info);
43 
44 	while ((capability = &dev_info.capabilities[i++])->algo !=
45 			RTE_COMP_ALGO_UNSPECIFIED){
46 		if (capability->algo == algo)
47 			return capability;
48 	}
49 
50 	return NULL;
51 }
52 
53 const char *
54 rte_compressdev_get_feature_name(uint64_t flag)
55 {
56 	switch (flag) {
57 	case RTE_COMPDEV_FF_HW_ACCELERATED:
58 		return "HW_ACCELERATED";
59 	case RTE_COMPDEV_FF_CPU_SSE:
60 		return "CPU_SSE";
61 	case RTE_COMPDEV_FF_CPU_AVX:
62 		return "CPU_AVX";
63 	case RTE_COMPDEV_FF_CPU_AVX2:
64 		return "CPU_AVX2";
65 	case RTE_COMPDEV_FF_CPU_AVX512:
66 		return "CPU_AVX512";
67 	case RTE_COMPDEV_FF_CPU_NEON:
68 		return "CPU_NEON";
69 	case RTE_COMPDEV_FF_OP_DONE_IN_DEQUEUE:
70 		return "OP_DONE_IN_DEQ";
71 	default:
72 		return NULL;
73 	}
74 }
75 
76 static struct rte_compressdev *
77 rte_compressdev_get_dev(uint8_t dev_id)
78 {
79 	return &compressdev_globals.devs[dev_id];
80 }
81 
82 struct rte_compressdev *
83 rte_compressdev_pmd_get_named_dev(const char *name)
84 {
85 	struct rte_compressdev *dev;
86 	unsigned int i;
87 
88 	if (name == NULL)
89 		return NULL;
90 
91 	for (i = 0; i < compressdev_globals.max_devs; i++) {
92 		dev = &compressdev_globals.devs[i];
93 
94 		if ((dev->attached == RTE_COMPRESSDEV_ATTACHED) &&
95 				(strcmp(dev->data->name, name) == 0))
96 			return dev;
97 	}
98 
99 	return NULL;
100 }
101 
102 static unsigned int
103 rte_compressdev_is_valid_dev(uint8_t dev_id)
104 {
105 	struct rte_compressdev *dev = NULL;
106 
107 	if (dev_id >= compressdev_globals.nb_devs)
108 		return 0;
109 
110 	dev = rte_compressdev_get_dev(dev_id);
111 	if (dev->attached != RTE_COMPRESSDEV_ATTACHED)
112 		return 0;
113 	else
114 		return 1;
115 }
116 
117 
118 int
119 rte_compressdev_get_dev_id(const char *name)
120 {
121 	unsigned int i;
122 
123 	if (name == NULL)
124 		return -1;
125 
126 	for (i = 0; i < compressdev_globals.nb_devs; i++)
127 		if ((strcmp(compressdev_globals.devs[i].data->name, name)
128 				== 0) &&
129 				(compressdev_globals.devs[i].attached ==
130 						RTE_COMPRESSDEV_ATTACHED))
131 			return i;
132 
133 	return -1;
134 }
135 
136 uint8_t
137 rte_compressdev_count(void)
138 {
139 	return compressdev_globals.nb_devs;
140 }
141 
142 uint8_t
143 rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
144 	uint8_t nb_devices)
145 {
146 	uint8_t i, count = 0;
147 	struct rte_compressdev *devs = compressdev_globals.devs;
148 	uint8_t max_devs = compressdev_globals.max_devs;
149 
150 	for (i = 0; i < max_devs && count < nb_devices;	i++) {
151 
152 		if (devs[i].attached == RTE_COMPRESSDEV_ATTACHED) {
153 			int cmp;
154 
155 			cmp = strncmp(devs[i].device->driver->name,
156 					driver_name,
157 					strlen(driver_name));
158 
159 			if (cmp == 0)
160 				devices[count++] = devs[i].data->dev_id;
161 		}
162 	}
163 
164 	return count;
165 }
166 
167 int
168 rte_compressdev_socket_id(uint8_t dev_id)
169 {
170 	struct rte_compressdev *dev;
171 
172 	if (!rte_compressdev_is_valid_dev(dev_id))
173 		return -1;
174 
175 	dev = rte_compressdev_get_dev(dev_id);
176 
177 	return dev->data->socket_id;
178 }
179 
180 static inline int
181 rte_compressdev_data_alloc(uint8_t dev_id, struct rte_compressdev_data **data,
182 		int socket_id)
183 {
184 	char mz_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
185 	const struct rte_memzone *mz;
186 	int n;
187 
188 	/* generate memzone name */
189 	n = snprintf(mz_name, sizeof(mz_name),
190 			"rte_compressdev_data_%u", dev_id);
191 	if (n >= (int)sizeof(mz_name))
192 		return -EINVAL;
193 
194 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
195 		mz = rte_memzone_reserve(mz_name,
196 				sizeof(struct rte_compressdev_data),
197 				socket_id, 0);
198 	} else
199 		mz = rte_memzone_lookup(mz_name);
200 
201 	if (mz == NULL)
202 		return -ENOMEM;
203 
204 	*data = mz->addr;
205 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
206 		memset(*data, 0, sizeof(struct rte_compressdev_data));
207 
208 	return 0;
209 }
210 
211 static uint8_t
212 rte_compressdev_find_free_device_index(void)
213 {
214 	uint8_t dev_id;
215 
216 	for (dev_id = 0; dev_id < RTE_COMPRESS_MAX_DEVS; dev_id++) {
217 		if (rte_comp_devices[dev_id].attached ==
218 				RTE_COMPRESSDEV_DETACHED)
219 			return dev_id;
220 	}
221 	return RTE_COMPRESS_MAX_DEVS;
222 }
223 
224 struct rte_compressdev *
225 rte_compressdev_pmd_allocate(const char *name, int socket_id)
226 {
227 	struct rte_compressdev *compressdev;
228 	uint8_t dev_id;
229 
230 	if (rte_compressdev_pmd_get_named_dev(name) != NULL) {
231 		COMPRESSDEV_LOG(ERR,
232 			"comp device with name %s already allocated!", name);
233 		return NULL;
234 	}
235 
236 	dev_id = rte_compressdev_find_free_device_index();
237 	if (dev_id == RTE_COMPRESS_MAX_DEVS) {
238 		COMPRESSDEV_LOG(ERR, "Reached maximum number of comp devices");
239 		return NULL;
240 	}
241 	compressdev = rte_compressdev_get_dev(dev_id);
242 
243 	if (compressdev->data == NULL) {
244 		struct rte_compressdev_data *compressdev_data =
245 				compressdev_globals.data[dev_id];
246 
247 		int retval = rte_compressdev_data_alloc(dev_id,
248 				&compressdev_data, socket_id);
249 
250 		if (retval < 0 || compressdev_data == NULL)
251 			return NULL;
252 
253 		compressdev->data = compressdev_data;
254 
255 		strlcpy(compressdev->data->name, name,
256 			RTE_COMPRESSDEV_NAME_MAX_LEN);
257 
258 		compressdev->data->dev_id = dev_id;
259 		compressdev->data->socket_id = socket_id;
260 		compressdev->data->dev_started = 0;
261 
262 		compressdev->attached = RTE_COMPRESSDEV_ATTACHED;
263 
264 		compressdev_globals.nb_devs++;
265 	}
266 
267 	return compressdev;
268 }
269 
270 int
271 rte_compressdev_pmd_release_device(struct rte_compressdev *compressdev)
272 {
273 	int ret;
274 
275 	if (compressdev == NULL)
276 		return -EINVAL;
277 
278 	/* Close device only if device operations have been set */
279 	if (compressdev->dev_ops) {
280 		ret = rte_compressdev_close(compressdev->data->dev_id);
281 		if (ret < 0)
282 			return ret;
283 	}
284 
285 	compressdev->attached = RTE_COMPRESSDEV_DETACHED;
286 	compressdev_globals.nb_devs--;
287 	return 0;
288 }
289 
290 uint16_t
291 rte_compressdev_queue_pair_count(uint8_t dev_id)
292 {
293 	struct rte_compressdev *dev;
294 
295 	dev = &rte_comp_devices[dev_id];
296 	return dev->data->nb_queue_pairs;
297 }
298 
299 static int
300 rte_compressdev_queue_pairs_config(struct rte_compressdev *dev,
301 		uint16_t nb_qpairs, int socket_id)
302 {
303 	struct rte_compressdev_info dev_info;
304 	void **qp;
305 	unsigned int i;
306 
307 	if ((dev == NULL) || (nb_qpairs < 1)) {
308 		COMPRESSDEV_LOG(ERR, "invalid param: dev %p, nb_queues %u",
309 							dev, nb_qpairs);
310 		return -EINVAL;
311 	}
312 
313 	COMPRESSDEV_LOG(DEBUG, "Setup %d queues pairs on device %u",
314 			nb_qpairs, dev->data->dev_id);
315 
316 	memset(&dev_info, 0, sizeof(struct rte_compressdev_info));
317 
318 	if (*dev->dev_ops->dev_infos_get == NULL)
319 		return -ENOTSUP;
320 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
321 
322 	if ((dev_info.max_nb_queue_pairs != 0) &&
323 			(nb_qpairs > dev_info.max_nb_queue_pairs)) {
324 		COMPRESSDEV_LOG(ERR, "Invalid num queue_pairs (%u) for dev %u",
325 				nb_qpairs, dev->data->dev_id);
326 		return -EINVAL;
327 	}
328 
329 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
330 		dev->data->queue_pairs = rte_zmalloc_socket(
331 				"compressdev->queue_pairs",
332 				sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
333 				RTE_CACHE_LINE_SIZE, socket_id);
334 
335 		if (dev->data->queue_pairs == NULL) {
336 			dev->data->nb_queue_pairs = 0;
337 			COMPRESSDEV_LOG(ERR,
338 			"failed to get memory for qp meta data, nb_queues %u",
339 							nb_qpairs);
340 			return -(ENOMEM);
341 		}
342 	} else { /* re-configure */
343 		int ret;
344 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
345 
346 		qp = dev->data->queue_pairs;
347 
348 		if (*dev->dev_ops->queue_pair_release == NULL)
349 			return -ENOTSUP;
350 
351 		for (i = nb_qpairs; i < old_nb_queues; i++) {
352 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
353 			if (ret < 0)
354 				return ret;
355 		}
356 
357 		qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
358 				RTE_CACHE_LINE_SIZE);
359 		if (qp == NULL) {
360 			COMPRESSDEV_LOG(ERR,
361 			"failed to realloc qp meta data, nb_queues %u",
362 						nb_qpairs);
363 			return -(ENOMEM);
364 		}
365 
366 		if (nb_qpairs > old_nb_queues) {
367 			uint16_t new_qs = nb_qpairs - old_nb_queues;
368 
369 			memset(qp + old_nb_queues, 0,
370 				sizeof(qp[0]) * new_qs);
371 		}
372 
373 		dev->data->queue_pairs = qp;
374 
375 	}
376 	dev->data->nb_queue_pairs = nb_qpairs;
377 	return 0;
378 }
379 
380 static int
381 rte_compressdev_queue_pairs_release(struct rte_compressdev *dev)
382 {
383 	uint16_t num_qps, i;
384 	int ret;
385 
386 	if (dev == NULL) {
387 		COMPRESSDEV_LOG(ERR, "invalid param: dev %p", dev);
388 		return -EINVAL;
389 	}
390 
391 	num_qps = dev->data->nb_queue_pairs;
392 
393 	if (num_qps == 0)
394 		return 0;
395 
396 	COMPRESSDEV_LOG(DEBUG, "Free %d queues pairs on device %u",
397 			dev->data->nb_queue_pairs, dev->data->dev_id);
398 
399 	if (*dev->dev_ops->queue_pair_release == NULL)
400 		return -ENOTSUP;
401 
402 	for (i = 0; i < num_qps; i++) {
403 		ret = (*dev->dev_ops->queue_pair_release)(dev, i);
404 		if (ret < 0)
405 			return ret;
406 	}
407 
408 	rte_free(dev->data->queue_pairs);
409 	dev->data->queue_pairs = NULL;
410 	dev->data->nb_queue_pairs = 0;
411 
412 	return 0;
413 }
414 
415 int
416 rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
417 {
418 	struct rte_compressdev *dev;
419 	int diag;
420 
421 	if (!rte_compressdev_is_valid_dev(dev_id)) {
422 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
423 		return -EINVAL;
424 	}
425 
426 	dev = &rte_comp_devices[dev_id];
427 
428 	if (dev->data->dev_started) {
429 		COMPRESSDEV_LOG(ERR,
430 		    "device %d must be stopped to allow configuration", dev_id);
431 		return -EBUSY;
432 	}
433 
434 	if (*dev->dev_ops->dev_configure == NULL)
435 		return -ENOTSUP;
436 
437 	/* Setup new number of queue pairs and reconfigure device. */
438 	diag = rte_compressdev_queue_pairs_config(dev, config->nb_queue_pairs,
439 			config->socket_id);
440 	if (diag != 0) {
441 		COMPRESSDEV_LOG(ERR,
442 			"dev%d rte_comp_dev_queue_pairs_config = %d",
443 				dev_id, diag);
444 		return diag;
445 	}
446 
447 	return (*dev->dev_ops->dev_configure)(dev, config);
448 }
449 
450 int
451 rte_compressdev_start(uint8_t dev_id)
452 {
453 	struct rte_compressdev *dev;
454 	int diag;
455 
456 	COMPRESSDEV_LOG(DEBUG, "Start dev_id=%" PRIu8, dev_id);
457 
458 	if (!rte_compressdev_is_valid_dev(dev_id)) {
459 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
460 		return -EINVAL;
461 	}
462 
463 	dev = &rte_comp_devices[dev_id];
464 
465 	if (*dev->dev_ops->dev_start == NULL)
466 		return -ENOTSUP;
467 
468 	if (dev->data->dev_started != 0) {
469 		COMPRESSDEV_LOG(ERR,
470 		    "Device with dev_id=%" PRIu8 " already started", dev_id);
471 		return 0;
472 	}
473 
474 	diag = (*dev->dev_ops->dev_start)(dev);
475 	if (diag == 0)
476 		dev->data->dev_started = 1;
477 	else
478 		return diag;
479 
480 	return 0;
481 }
482 
483 void
484 rte_compressdev_stop(uint8_t dev_id)
485 {
486 	struct rte_compressdev *dev;
487 
488 	if (!rte_compressdev_is_valid_dev(dev_id)) {
489 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
490 		return;
491 	}
492 
493 	dev = &rte_comp_devices[dev_id];
494 
495 	if (*dev->dev_ops->dev_stop == NULL)
496 		return;
497 
498 	if (dev->data->dev_started == 0) {
499 		COMPRESSDEV_LOG(ERR,
500 		    "Device with dev_id=%" PRIu8 " already stopped", dev_id);
501 		return;
502 	}
503 
504 	(*dev->dev_ops->dev_stop)(dev);
505 	dev->data->dev_started = 0;
506 }
507 
508 int
509 rte_compressdev_close(uint8_t dev_id)
510 {
511 	struct rte_compressdev *dev;
512 	int retval;
513 
514 	if (!rte_compressdev_is_valid_dev(dev_id)) {
515 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
516 		return -1;
517 	}
518 
519 	dev = &rte_comp_devices[dev_id];
520 
521 	/* Device must be stopped before it can be closed */
522 	if (dev->data->dev_started == 1) {
523 		COMPRESSDEV_LOG(ERR, "Device %u must be stopped before closing",
524 				dev_id);
525 		return -EBUSY;
526 	}
527 
528 	/* Free queue pairs memory */
529 	retval = rte_compressdev_queue_pairs_release(dev);
530 
531 	if (retval < 0)
532 		return retval;
533 
534 	if (*dev->dev_ops->dev_close == NULL)
535 		return -ENOTSUP;
536 	retval = (*dev->dev_ops->dev_close)(dev);
537 
538 	if (retval < 0)
539 		return retval;
540 
541 	return 0;
542 }
543 
544 int
545 rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
546 		uint32_t max_inflight_ops, int socket_id)
547 {
548 	struct rte_compressdev *dev;
549 
550 	if (!rte_compressdev_is_valid_dev(dev_id)) {
551 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
552 		return -EINVAL;
553 	}
554 
555 	dev = &rte_comp_devices[dev_id];
556 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
557 		COMPRESSDEV_LOG(ERR, "Invalid queue_pair_id=%d", queue_pair_id);
558 		return -EINVAL;
559 	}
560 
561 	if (dev->data->dev_started) {
562 		COMPRESSDEV_LOG(ERR,
563 		    "device %d must be stopped to allow configuration", dev_id);
564 		return -EBUSY;
565 	}
566 
567 	if (max_inflight_ops == 0) {
568 		COMPRESSDEV_LOG(ERR,
569 			"Invalid maximum number of inflight operations");
570 		return -EINVAL;
571 	}
572 
573 	if (*dev->dev_ops->queue_pair_setup == NULL)
574 		return -ENOTSUP;
575 
576 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id,
577 			max_inflight_ops, socket_id);
578 }
579 
580 uint16_t
581 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
582 		struct rte_comp_op **ops, uint16_t nb_ops)
583 {
584 	struct rte_compressdev *dev = &rte_comp_devices[dev_id];
585 
586 	nb_ops = (*dev->dequeue_burst)
587 			(dev->data->queue_pairs[qp_id], ops, nb_ops);
588 
589 	return nb_ops;
590 }
591 
592 uint16_t
593 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
594 		struct rte_comp_op **ops, uint16_t nb_ops)
595 {
596 	struct rte_compressdev *dev = &rte_comp_devices[dev_id];
597 
598 	return (*dev->enqueue_burst)(
599 			dev->data->queue_pairs[qp_id], ops, nb_ops);
600 }
601 
602 int
603 rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats)
604 {
605 	struct rte_compressdev *dev;
606 
607 	if (!rte_compressdev_is_valid_dev(dev_id)) {
608 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
609 		return -ENODEV;
610 	}
611 
612 	if (stats == NULL) {
613 		COMPRESSDEV_LOG(ERR, "Invalid stats ptr");
614 		return -EINVAL;
615 	}
616 
617 	dev = &rte_comp_devices[dev_id];
618 	memset(stats, 0, sizeof(*stats));
619 
620 	if (*dev->dev_ops->stats_get == NULL)
621 		return -ENOTSUP;
622 	(*dev->dev_ops->stats_get)(dev, stats);
623 	return 0;
624 }
625 
626 void
627 rte_compressdev_stats_reset(uint8_t dev_id)
628 {
629 	struct rte_compressdev *dev;
630 
631 	if (!rte_compressdev_is_valid_dev(dev_id)) {
632 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
633 		return;
634 	}
635 
636 	dev = &rte_comp_devices[dev_id];
637 
638 	if (*dev->dev_ops->stats_reset == NULL)
639 		return;
640 	(*dev->dev_ops->stats_reset)(dev);
641 }
642 
643 
644 void
645 rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
646 {
647 	struct rte_compressdev *dev;
648 
649 	if (dev_id >= compressdev_globals.nb_devs) {
650 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
651 		return;
652 	}
653 
654 	dev = &rte_comp_devices[dev_id];
655 
656 	memset(dev_info, 0, sizeof(struct rte_compressdev_info));
657 
658 	if (*dev->dev_ops->dev_infos_get == NULL)
659 		return;
660 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
661 
662 	dev_info->driver_name = dev->device->driver->name;
663 }
664 
665 int
666 rte_compressdev_private_xform_create(uint8_t dev_id,
667 		const struct rte_comp_xform *xform,
668 		void **priv_xform)
669 {
670 	struct rte_compressdev *dev;
671 	int ret;
672 
673 	dev = rte_compressdev_get_dev(dev_id);
674 
675 	if (xform == NULL || priv_xform == NULL || dev == NULL)
676 		return -EINVAL;
677 
678 	if (*dev->dev_ops->private_xform_create == NULL)
679 		return -ENOTSUP;
680 	ret = (*dev->dev_ops->private_xform_create)(dev, xform, priv_xform);
681 	if (ret < 0) {
682 		COMPRESSDEV_LOG(ERR,
683 			"dev_id %d failed to create private_xform: err=%d",
684 			dev_id, ret);
685 		return ret;
686 	};
687 
688 	return 0;
689 }
690 
691 int
692 rte_compressdev_private_xform_free(uint8_t dev_id, void *priv_xform)
693 {
694 	struct rte_compressdev *dev;
695 	int ret;
696 
697 	dev = rte_compressdev_get_dev(dev_id);
698 
699 	if (dev == NULL || priv_xform == NULL)
700 		return -EINVAL;
701 
702 	if (*dev->dev_ops->private_xform_free == NULL)
703 		return -ENOTSUP;
704 	ret = dev->dev_ops->private_xform_free(dev, priv_xform);
705 	if (ret < 0) {
706 		COMPRESSDEV_LOG(ERR,
707 			"dev_id %d failed to free private xform: err=%d",
708 			dev_id, ret);
709 		return ret;
710 	};
711 
712 	return 0;
713 }
714 
715 int
716 rte_compressdev_stream_create(uint8_t dev_id,
717 		const struct rte_comp_xform *xform,
718 		void **stream)
719 {
720 	struct rte_compressdev *dev;
721 	int ret;
722 
723 	dev = rte_compressdev_get_dev(dev_id);
724 
725 	if (xform == NULL || dev == NULL || stream == NULL)
726 		return -EINVAL;
727 
728 	if (*dev->dev_ops->stream_create == NULL)
729 		return -ENOTSUP;
730 	ret = (*dev->dev_ops->stream_create)(dev, xform, stream);
731 	if (ret < 0) {
732 		COMPRESSDEV_LOG(ERR,
733 			"dev_id %d failed to create stream: err=%d",
734 			dev_id, ret);
735 		return ret;
736 	};
737 
738 	return 0;
739 }
740 
741 
742 int
743 rte_compressdev_stream_free(uint8_t dev_id, void *stream)
744 {
745 	struct rte_compressdev *dev;
746 	int ret;
747 
748 	dev = rte_compressdev_get_dev(dev_id);
749 
750 	if (dev == NULL || stream == NULL)
751 		return -EINVAL;
752 
753 	if (*dev->dev_ops->stream_free == NULL)
754 		return -ENOTSUP;
755 	ret = dev->dev_ops->stream_free(dev, stream);
756 	if (ret < 0) {
757 		COMPRESSDEV_LOG(ERR,
758 			"dev_id %d failed to free stream: err=%d",
759 			dev_id, ret);
760 		return ret;
761 	};
762 
763 	return 0;
764 }
765 
766 const char *
767 rte_compressdev_name_get(uint8_t dev_id)
768 {
769 	struct rte_compressdev *dev = rte_compressdev_get_dev(dev_id);
770 
771 	if (dev == NULL)
772 		return NULL;
773 
774 	return dev->data->name;
775 }
776 
777 RTE_LOG_REGISTER_DEFAULT(compressdev_logtype, NOTICE);
778