xref: /dpdk/lib/compressdev/rte_compressdev.c (revision 1acb7f547455f636a6968cb3b4ca3870279dfece)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <stdio.h>
7 #include <inttypes.h>
8 
9 #include <rte_string_fns.h>
10 #include <rte_malloc.h>
11 #include <dev_driver.h>
12 #include <rte_eal.h>
13 #include <rte_memzone.h>
14 
15 #include "rte_compressdev.h"
16 #include "rte_compressdev_internal.h"
17 #include "rte_compressdev_pmd.h"
18 
19 #define RTE_COMPRESSDEV_DETACHED  (0)
20 #define RTE_COMPRESSDEV_ATTACHED  (1)
21 
22 static struct rte_compressdev rte_comp_devices[RTE_COMPRESS_MAX_DEVS];
23 
24 static struct rte_compressdev_global compressdev_globals = {
25 		.devs			= rte_comp_devices,
26 		.data			= { NULL },
27 		.nb_devs		= 0,
28 		.max_devs		= RTE_COMPRESS_MAX_DEVS
29 };
30 
31 const struct rte_compressdev_capabilities *
rte_compressdev_capability_get(uint8_t dev_id,enum rte_comp_algorithm algo)32 rte_compressdev_capability_get(uint8_t dev_id,
33 			enum rte_comp_algorithm algo)
34 {
35 	const struct rte_compressdev_capabilities *capability;
36 	struct rte_compressdev_info dev_info;
37 	int i = 0;
38 
39 	if (dev_id >= compressdev_globals.nb_devs) {
40 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
41 		return NULL;
42 	}
43 	rte_compressdev_info_get(dev_id, &dev_info);
44 
45 	while ((capability = &dev_info.capabilities[i++])->algo !=
46 			RTE_COMP_ALGO_UNSPECIFIED){
47 		if (capability->algo == algo)
48 			return capability;
49 	}
50 
51 	return NULL;
52 }
53 
54 const char *
rte_compressdev_get_feature_name(uint64_t flag)55 rte_compressdev_get_feature_name(uint64_t flag)
56 {
57 	switch (flag) {
58 	case RTE_COMPDEV_FF_HW_ACCELERATED:
59 		return "HW_ACCELERATED";
60 	case RTE_COMPDEV_FF_CPU_SSE:
61 		return "CPU_SSE";
62 	case RTE_COMPDEV_FF_CPU_AVX:
63 		return "CPU_AVX";
64 	case RTE_COMPDEV_FF_CPU_AVX2:
65 		return "CPU_AVX2";
66 	case RTE_COMPDEV_FF_CPU_AVX512:
67 		return "CPU_AVX512";
68 	case RTE_COMPDEV_FF_CPU_NEON:
69 		return "CPU_NEON";
70 	case RTE_COMPDEV_FF_OP_DONE_IN_DEQUEUE:
71 		return "OP_DONE_IN_DEQ";
72 	default:
73 		return NULL;
74 	}
75 }
76 
77 static struct rte_compressdev *
rte_compressdev_get_dev(uint8_t dev_id)78 rte_compressdev_get_dev(uint8_t dev_id)
79 {
80 	return &compressdev_globals.devs[dev_id];
81 }
82 
83 struct rte_compressdev *
rte_compressdev_pmd_get_named_dev(const char * name)84 rte_compressdev_pmd_get_named_dev(const char *name)
85 {
86 	struct rte_compressdev *dev;
87 	unsigned int i;
88 
89 	if (name == NULL)
90 		return NULL;
91 
92 	for (i = 0; i < compressdev_globals.max_devs; i++) {
93 		dev = &compressdev_globals.devs[i];
94 
95 		if ((dev->attached == RTE_COMPRESSDEV_ATTACHED) &&
96 				(strcmp(dev->data->name, name) == 0))
97 			return dev;
98 	}
99 
100 	return NULL;
101 }
102 
103 static unsigned int
rte_compressdev_is_valid_dev(uint8_t dev_id)104 rte_compressdev_is_valid_dev(uint8_t dev_id)
105 {
106 	struct rte_compressdev *dev = NULL;
107 
108 	if (dev_id >= compressdev_globals.nb_devs)
109 		return 0;
110 
111 	dev = rte_compressdev_get_dev(dev_id);
112 	if (dev->attached != RTE_COMPRESSDEV_ATTACHED)
113 		return 0;
114 	else
115 		return 1;
116 }
117 
118 
119 int
rte_compressdev_get_dev_id(const char * name)120 rte_compressdev_get_dev_id(const char *name)
121 {
122 	unsigned int i;
123 
124 	if (name == NULL)
125 		return -1;
126 
127 	for (i = 0; i < compressdev_globals.nb_devs; i++)
128 		if ((strcmp(compressdev_globals.devs[i].data->name, name)
129 				== 0) &&
130 				(compressdev_globals.devs[i].attached ==
131 						RTE_COMPRESSDEV_ATTACHED))
132 			return i;
133 
134 	return -1;
135 }
136 
137 uint8_t
rte_compressdev_count(void)138 rte_compressdev_count(void)
139 {
140 	return compressdev_globals.nb_devs;
141 }
142 
143 uint8_t
rte_compressdev_devices_get(const char * driver_name,uint8_t * devices,uint8_t nb_devices)144 rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
145 	uint8_t nb_devices)
146 {
147 	uint8_t i, count = 0;
148 	struct rte_compressdev *devs = compressdev_globals.devs;
149 	uint8_t max_devs = compressdev_globals.max_devs;
150 
151 	for (i = 0; i < max_devs && count < nb_devices;	i++) {
152 
153 		if (devs[i].attached == RTE_COMPRESSDEV_ATTACHED) {
154 			int cmp;
155 
156 			cmp = strncmp(devs[i].device->driver->name,
157 					driver_name,
158 					strlen(driver_name));
159 
160 			if (cmp == 0)
161 				devices[count++] = devs[i].data->dev_id;
162 		}
163 	}
164 
165 	return count;
166 }
167 
168 int
rte_compressdev_socket_id(uint8_t dev_id)169 rte_compressdev_socket_id(uint8_t dev_id)
170 {
171 	struct rte_compressdev *dev;
172 
173 	if (!rte_compressdev_is_valid_dev(dev_id))
174 		return -1;
175 
176 	dev = rte_compressdev_get_dev(dev_id);
177 
178 	return dev->data->socket_id;
179 }
180 
181 static inline int
rte_compressdev_data_alloc(uint8_t dev_id,struct rte_compressdev_data ** data,int socket_id)182 rte_compressdev_data_alloc(uint8_t dev_id, struct rte_compressdev_data **data,
183 		int socket_id)
184 {
185 	char mz_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
186 	const struct rte_memzone *mz;
187 	int n;
188 
189 	/* generate memzone name */
190 	n = snprintf(mz_name, sizeof(mz_name),
191 			"rte_compressdev_data_%u", dev_id);
192 	if (n >= (int)sizeof(mz_name))
193 		return -EINVAL;
194 
195 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
196 		mz = rte_memzone_reserve(mz_name,
197 				sizeof(struct rte_compressdev_data),
198 				socket_id, 0);
199 	} else
200 		mz = rte_memzone_lookup(mz_name);
201 
202 	if (mz == NULL)
203 		return -ENOMEM;
204 
205 	*data = mz->addr;
206 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
207 		memset(*data, 0, sizeof(struct rte_compressdev_data));
208 
209 	return 0;
210 }
211 
212 static uint8_t
rte_compressdev_find_free_device_index(void)213 rte_compressdev_find_free_device_index(void)
214 {
215 	uint8_t dev_id;
216 
217 	for (dev_id = 0; dev_id < RTE_COMPRESS_MAX_DEVS; dev_id++) {
218 		if (rte_comp_devices[dev_id].attached ==
219 				RTE_COMPRESSDEV_DETACHED)
220 			return dev_id;
221 	}
222 	return RTE_COMPRESS_MAX_DEVS;
223 }
224 
225 struct rte_compressdev *
rte_compressdev_pmd_allocate(const char * name,int socket_id)226 rte_compressdev_pmd_allocate(const char *name, int socket_id)
227 {
228 	struct rte_compressdev *compressdev;
229 	uint8_t dev_id;
230 
231 	if (rte_compressdev_pmd_get_named_dev(name) != NULL) {
232 		COMPRESSDEV_LOG(ERR,
233 			"comp device with name %s already allocated!", name);
234 		return NULL;
235 	}
236 
237 	dev_id = rte_compressdev_find_free_device_index();
238 	if (dev_id == RTE_COMPRESS_MAX_DEVS) {
239 		COMPRESSDEV_LOG(ERR, "Reached maximum number of comp devices");
240 		return NULL;
241 	}
242 	compressdev = rte_compressdev_get_dev(dev_id);
243 
244 	if (compressdev->data == NULL) {
245 		struct rte_compressdev_data *compressdev_data =
246 				compressdev_globals.data[dev_id];
247 
248 		int retval = rte_compressdev_data_alloc(dev_id,
249 				&compressdev_data, socket_id);
250 
251 		if (retval < 0 || compressdev_data == NULL)
252 			return NULL;
253 
254 		compressdev->data = compressdev_data;
255 
256 		strlcpy(compressdev->data->name, name,
257 			RTE_COMPRESSDEV_NAME_MAX_LEN);
258 
259 		compressdev->data->dev_id = dev_id;
260 		compressdev->data->socket_id = socket_id;
261 		compressdev->data->dev_started = 0;
262 
263 		compressdev->attached = RTE_COMPRESSDEV_ATTACHED;
264 
265 		compressdev_globals.nb_devs++;
266 	}
267 
268 	return compressdev;
269 }
270 
271 int
rte_compressdev_pmd_release_device(struct rte_compressdev * compressdev)272 rte_compressdev_pmd_release_device(struct rte_compressdev *compressdev)
273 {
274 	int ret;
275 
276 	if (compressdev == NULL)
277 		return -EINVAL;
278 
279 	/* Close device only if device operations have been set */
280 	if (compressdev->dev_ops) {
281 		ret = rte_compressdev_close(compressdev->data->dev_id);
282 		if (ret < 0)
283 			return ret;
284 	}
285 
286 	compressdev->attached = RTE_COMPRESSDEV_DETACHED;
287 	compressdev_globals.nb_devs--;
288 	return 0;
289 }
290 
291 uint16_t
rte_compressdev_queue_pair_count(uint8_t dev_id)292 rte_compressdev_queue_pair_count(uint8_t dev_id)
293 {
294 	struct rte_compressdev *dev;
295 
296 	dev = &rte_comp_devices[dev_id];
297 	return dev->data->nb_queue_pairs;
298 }
299 
300 static int
rte_compressdev_queue_pairs_config(struct rte_compressdev * dev,uint16_t nb_qpairs,int socket_id)301 rte_compressdev_queue_pairs_config(struct rte_compressdev *dev,
302 		uint16_t nb_qpairs, int socket_id)
303 {
304 	struct rte_compressdev_info dev_info;
305 	void **qp;
306 	unsigned int i;
307 
308 	if ((dev == NULL) || (nb_qpairs < 1)) {
309 		COMPRESSDEV_LOG(ERR, "invalid param: dev %p, nb_queues %u",
310 							dev, nb_qpairs);
311 		return -EINVAL;
312 	}
313 
314 	COMPRESSDEV_LOG(DEBUG, "Setup %d queues pairs on device %u",
315 			nb_qpairs, dev->data->dev_id);
316 
317 	memset(&dev_info, 0, sizeof(struct rte_compressdev_info));
318 
319 	if (*dev->dev_ops->dev_infos_get == NULL)
320 		return -ENOTSUP;
321 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
322 
323 	if ((dev_info.max_nb_queue_pairs != 0) &&
324 			(nb_qpairs > dev_info.max_nb_queue_pairs)) {
325 		COMPRESSDEV_LOG(ERR, "Invalid num queue_pairs (%u) for dev %u",
326 				nb_qpairs, dev->data->dev_id);
327 		return -EINVAL;
328 	}
329 
330 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
331 		dev->data->queue_pairs = rte_zmalloc_socket(
332 				"compressdev->queue_pairs",
333 				sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
334 				RTE_CACHE_LINE_SIZE, socket_id);
335 
336 		if (dev->data->queue_pairs == NULL) {
337 			dev->data->nb_queue_pairs = 0;
338 			COMPRESSDEV_LOG(ERR,
339 			"failed to get memory for qp meta data, nb_queues %u",
340 							nb_qpairs);
341 			return -(ENOMEM);
342 		}
343 	} else { /* re-configure */
344 		int ret;
345 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
346 
347 		qp = dev->data->queue_pairs;
348 
349 		if (*dev->dev_ops->queue_pair_release == NULL)
350 			return -ENOTSUP;
351 
352 		for (i = nb_qpairs; i < old_nb_queues; i++) {
353 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
354 			if (ret < 0)
355 				return ret;
356 		}
357 
358 		qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
359 				RTE_CACHE_LINE_SIZE);
360 		if (qp == NULL) {
361 			COMPRESSDEV_LOG(ERR,
362 			"failed to realloc qp meta data, nb_queues %u",
363 						nb_qpairs);
364 			return -(ENOMEM);
365 		}
366 
367 		if (nb_qpairs > old_nb_queues) {
368 			uint16_t new_qs = nb_qpairs - old_nb_queues;
369 
370 			memset(qp + old_nb_queues, 0,
371 				sizeof(qp[0]) * new_qs);
372 		}
373 
374 		dev->data->queue_pairs = qp;
375 
376 	}
377 	dev->data->nb_queue_pairs = nb_qpairs;
378 	return 0;
379 }
380 
381 static int
rte_compressdev_queue_pairs_release(struct rte_compressdev * dev)382 rte_compressdev_queue_pairs_release(struct rte_compressdev *dev)
383 {
384 	uint16_t num_qps, i;
385 	int ret;
386 
387 	if (dev == NULL) {
388 		COMPRESSDEV_LOG(ERR, "invalid param: dev %p", dev);
389 		return -EINVAL;
390 	}
391 
392 	num_qps = dev->data->nb_queue_pairs;
393 
394 	if (num_qps == 0)
395 		return 0;
396 
397 	COMPRESSDEV_LOG(DEBUG, "Free %d queues pairs on device %u",
398 			dev->data->nb_queue_pairs, dev->data->dev_id);
399 
400 	if (*dev->dev_ops->queue_pair_release == NULL)
401 		return -ENOTSUP;
402 
403 	for (i = 0; i < num_qps; i++) {
404 		ret = (*dev->dev_ops->queue_pair_release)(dev, i);
405 		if (ret < 0)
406 			return ret;
407 	}
408 
409 	rte_free(dev->data->queue_pairs);
410 	dev->data->queue_pairs = NULL;
411 	dev->data->nb_queue_pairs = 0;
412 
413 	return 0;
414 }
415 
416 int
rte_compressdev_configure(uint8_t dev_id,struct rte_compressdev_config * config)417 rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
418 {
419 	struct rte_compressdev *dev;
420 	int diag;
421 
422 	if (!rte_compressdev_is_valid_dev(dev_id)) {
423 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
424 		return -EINVAL;
425 	}
426 
427 	dev = &rte_comp_devices[dev_id];
428 
429 	if (dev->data->dev_started) {
430 		COMPRESSDEV_LOG(ERR,
431 		    "device %d must be stopped to allow configuration", dev_id);
432 		return -EBUSY;
433 	}
434 
435 	if (*dev->dev_ops->dev_configure == NULL)
436 		return -ENOTSUP;
437 
438 	/* Setup new number of queue pairs and reconfigure device. */
439 	diag = rte_compressdev_queue_pairs_config(dev, config->nb_queue_pairs,
440 			config->socket_id);
441 	if (diag != 0) {
442 		COMPRESSDEV_LOG(ERR,
443 			"dev%d rte_comp_dev_queue_pairs_config = %d",
444 				dev_id, diag);
445 		return diag;
446 	}
447 
448 	return (*dev->dev_ops->dev_configure)(dev, config);
449 }
450 
451 int
rte_compressdev_start(uint8_t dev_id)452 rte_compressdev_start(uint8_t dev_id)
453 {
454 	struct rte_compressdev *dev;
455 	int diag;
456 
457 	COMPRESSDEV_LOG(DEBUG, "Start dev_id=%" PRIu8, dev_id);
458 
459 	if (!rte_compressdev_is_valid_dev(dev_id)) {
460 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
461 		return -EINVAL;
462 	}
463 
464 	dev = &rte_comp_devices[dev_id];
465 
466 	if (*dev->dev_ops->dev_start == NULL)
467 		return -ENOTSUP;
468 
469 	if (dev->data->dev_started != 0) {
470 		COMPRESSDEV_LOG(ERR,
471 		    "Device with dev_id=%" PRIu8 " already started", dev_id);
472 		return 0;
473 	}
474 
475 	diag = (*dev->dev_ops->dev_start)(dev);
476 	if (diag == 0)
477 		dev->data->dev_started = 1;
478 	else
479 		return diag;
480 
481 	return 0;
482 }
483 
484 void
rte_compressdev_stop(uint8_t dev_id)485 rte_compressdev_stop(uint8_t dev_id)
486 {
487 	struct rte_compressdev *dev;
488 
489 	if (!rte_compressdev_is_valid_dev(dev_id)) {
490 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
491 		return;
492 	}
493 
494 	dev = &rte_comp_devices[dev_id];
495 
496 	if (*dev->dev_ops->dev_stop == NULL)
497 		return;
498 
499 	if (dev->data->dev_started == 0) {
500 		COMPRESSDEV_LOG(ERR,
501 		    "Device with dev_id=%" PRIu8 " already stopped", dev_id);
502 		return;
503 	}
504 
505 	(*dev->dev_ops->dev_stop)(dev);
506 	dev->data->dev_started = 0;
507 }
508 
509 int
rte_compressdev_close(uint8_t dev_id)510 rte_compressdev_close(uint8_t dev_id)
511 {
512 	struct rte_compressdev *dev;
513 	int retval;
514 
515 	if (!rte_compressdev_is_valid_dev(dev_id)) {
516 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
517 		return -1;
518 	}
519 
520 	dev = &rte_comp_devices[dev_id];
521 
522 	/* Device must be stopped before it can be closed */
523 	if (dev->data->dev_started == 1) {
524 		COMPRESSDEV_LOG(ERR, "Device %u must be stopped before closing",
525 				dev_id);
526 		return -EBUSY;
527 	}
528 
529 	/* Free queue pairs memory */
530 	retval = rte_compressdev_queue_pairs_release(dev);
531 
532 	if (retval < 0)
533 		return retval;
534 
535 	if (*dev->dev_ops->dev_close == NULL)
536 		return -ENOTSUP;
537 	retval = (*dev->dev_ops->dev_close)(dev);
538 
539 	if (retval < 0)
540 		return retval;
541 
542 	return 0;
543 }
544 
545 int
rte_compressdev_queue_pair_setup(uint8_t dev_id,uint16_t queue_pair_id,uint32_t max_inflight_ops,int socket_id)546 rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
547 		uint32_t max_inflight_ops, int socket_id)
548 {
549 	struct rte_compressdev *dev;
550 
551 	if (!rte_compressdev_is_valid_dev(dev_id)) {
552 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
553 		return -EINVAL;
554 	}
555 
556 	dev = &rte_comp_devices[dev_id];
557 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
558 		COMPRESSDEV_LOG(ERR, "Invalid queue_pair_id=%d", queue_pair_id);
559 		return -EINVAL;
560 	}
561 
562 	if (dev->data->dev_started) {
563 		COMPRESSDEV_LOG(ERR,
564 		    "device %d must be stopped to allow configuration", dev_id);
565 		return -EBUSY;
566 	}
567 
568 	if (max_inflight_ops == 0) {
569 		COMPRESSDEV_LOG(ERR,
570 			"Invalid maximum number of inflight operations");
571 		return -EINVAL;
572 	}
573 
574 	if (*dev->dev_ops->queue_pair_setup == NULL)
575 		return -ENOTSUP;
576 
577 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id,
578 			max_inflight_ops, socket_id);
579 }
580 
581 uint16_t
rte_compressdev_dequeue_burst(uint8_t dev_id,uint16_t qp_id,struct rte_comp_op ** ops,uint16_t nb_ops)582 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
583 		struct rte_comp_op **ops, uint16_t nb_ops)
584 {
585 	struct rte_compressdev *dev = &rte_comp_devices[dev_id];
586 
587 	nb_ops = (*dev->dequeue_burst)
588 			(dev->data->queue_pairs[qp_id], ops, nb_ops);
589 
590 	return nb_ops;
591 }
592 
593 uint16_t
rte_compressdev_enqueue_burst(uint8_t dev_id,uint16_t qp_id,struct rte_comp_op ** ops,uint16_t nb_ops)594 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
595 		struct rte_comp_op **ops, uint16_t nb_ops)
596 {
597 	struct rte_compressdev *dev = &rte_comp_devices[dev_id];
598 
599 	return (*dev->enqueue_burst)(
600 			dev->data->queue_pairs[qp_id], ops, nb_ops);
601 }
602 
603 int
rte_compressdev_stats_get(uint8_t dev_id,struct rte_compressdev_stats * stats)604 rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats)
605 {
606 	struct rte_compressdev *dev;
607 
608 	if (!rte_compressdev_is_valid_dev(dev_id)) {
609 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
610 		return -ENODEV;
611 	}
612 
613 	if (stats == NULL) {
614 		COMPRESSDEV_LOG(ERR, "Invalid stats ptr");
615 		return -EINVAL;
616 	}
617 
618 	dev = &rte_comp_devices[dev_id];
619 	memset(stats, 0, sizeof(*stats));
620 
621 	if (*dev->dev_ops->stats_get == NULL)
622 		return -ENOTSUP;
623 	(*dev->dev_ops->stats_get)(dev, stats);
624 	return 0;
625 }
626 
627 void
rte_compressdev_stats_reset(uint8_t dev_id)628 rte_compressdev_stats_reset(uint8_t dev_id)
629 {
630 	struct rte_compressdev *dev;
631 
632 	if (!rte_compressdev_is_valid_dev(dev_id)) {
633 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
634 		return;
635 	}
636 
637 	dev = &rte_comp_devices[dev_id];
638 
639 	if (*dev->dev_ops->stats_reset == NULL)
640 		return;
641 	(*dev->dev_ops->stats_reset)(dev);
642 }
643 
644 
645 void
rte_compressdev_info_get(uint8_t dev_id,struct rte_compressdev_info * dev_info)646 rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
647 {
648 	struct rte_compressdev *dev;
649 
650 	if (dev_id >= compressdev_globals.nb_devs) {
651 		COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
652 		return;
653 	}
654 
655 	dev = &rte_comp_devices[dev_id];
656 
657 	memset(dev_info, 0, sizeof(struct rte_compressdev_info));
658 
659 	if (*dev->dev_ops->dev_infos_get == NULL)
660 		return;
661 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
662 
663 	dev_info->driver_name = dev->device->driver->name;
664 }
665 
666 int
rte_compressdev_private_xform_create(uint8_t dev_id,const struct rte_comp_xform * xform,void ** priv_xform)667 rte_compressdev_private_xform_create(uint8_t dev_id,
668 		const struct rte_comp_xform *xform,
669 		void **priv_xform)
670 {
671 	struct rte_compressdev *dev;
672 	int ret;
673 
674 	dev = rte_compressdev_get_dev(dev_id);
675 
676 	if (xform == NULL || priv_xform == NULL || dev == NULL)
677 		return -EINVAL;
678 
679 	if (*dev->dev_ops->private_xform_create == NULL)
680 		return -ENOTSUP;
681 	ret = (*dev->dev_ops->private_xform_create)(dev, xform, priv_xform);
682 	if (ret < 0) {
683 		COMPRESSDEV_LOG(ERR,
684 			"dev_id %d failed to create private_xform: err=%d",
685 			dev_id, ret);
686 		return ret;
687 	};
688 
689 	return 0;
690 }
691 
692 int
rte_compressdev_private_xform_free(uint8_t dev_id,void * priv_xform)693 rte_compressdev_private_xform_free(uint8_t dev_id, void *priv_xform)
694 {
695 	struct rte_compressdev *dev;
696 	int ret;
697 
698 	dev = rte_compressdev_get_dev(dev_id);
699 
700 	if (dev == NULL || priv_xform == NULL)
701 		return -EINVAL;
702 
703 	if (*dev->dev_ops->private_xform_free == NULL)
704 		return -ENOTSUP;
705 	ret = dev->dev_ops->private_xform_free(dev, priv_xform);
706 	if (ret < 0) {
707 		COMPRESSDEV_LOG(ERR,
708 			"dev_id %d failed to free private xform: err=%d",
709 			dev_id, ret);
710 		return ret;
711 	};
712 
713 	return 0;
714 }
715 
716 int
rte_compressdev_stream_create(uint8_t dev_id,const struct rte_comp_xform * xform,void ** stream)717 rte_compressdev_stream_create(uint8_t dev_id,
718 		const struct rte_comp_xform *xform,
719 		void **stream)
720 {
721 	struct rte_compressdev *dev;
722 	int ret;
723 
724 	dev = rte_compressdev_get_dev(dev_id);
725 
726 	if (xform == NULL || dev == NULL || stream == NULL)
727 		return -EINVAL;
728 
729 	if (*dev->dev_ops->stream_create == NULL)
730 		return -ENOTSUP;
731 	ret = (*dev->dev_ops->stream_create)(dev, xform, stream);
732 	if (ret < 0) {
733 		COMPRESSDEV_LOG(ERR,
734 			"dev_id %d failed to create stream: err=%d",
735 			dev_id, ret);
736 		return ret;
737 	};
738 
739 	return 0;
740 }
741 
742 
743 int
rte_compressdev_stream_free(uint8_t dev_id,void * stream)744 rte_compressdev_stream_free(uint8_t dev_id, void *stream)
745 {
746 	struct rte_compressdev *dev;
747 	int ret;
748 
749 	dev = rte_compressdev_get_dev(dev_id);
750 
751 	if (dev == NULL || stream == NULL)
752 		return -EINVAL;
753 
754 	if (*dev->dev_ops->stream_free == NULL)
755 		return -ENOTSUP;
756 	ret = dev->dev_ops->stream_free(dev, stream);
757 	if (ret < 0) {
758 		COMPRESSDEV_LOG(ERR,
759 			"dev_id %d failed to free stream: err=%d",
760 			dev_id, ret);
761 		return ret;
762 	};
763 
764 	return 0;
765 }
766 
767 const char *
rte_compressdev_name_get(uint8_t dev_id)768 rte_compressdev_name_get(uint8_t dev_id)
769 {
770 	struct rte_compressdev *dev = rte_compressdev_get_dev(dev_id);
771 
772 	if (dev == NULL)
773 		return NULL;
774 
775 	return dev->data->name;
776 }
777 
778 RTE_LOG_REGISTER_DEFAULT(compressdev_logtype, NOTICE);
779