1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017-2018 Intel Corporation 3 */ 4 5 #include <string.h> 6 #include <stdio.h> 7 #include <inttypes.h> 8 9 #include <rte_string_fns.h> 10 #include <rte_malloc.h> 11 #include <rte_eal.h> 12 #include <rte_memzone.h> 13 14 #include "rte_compressdev.h" 15 #include "rte_compressdev_internal.h" 16 #include "rte_compressdev_pmd.h" 17 18 #define RTE_COMPRESSDEV_DETACHED (0) 19 #define RTE_COMPRESSDEV_ATTACHED (1) 20 21 static struct rte_compressdev rte_comp_devices[RTE_COMPRESS_MAX_DEVS]; 22 23 static struct rte_compressdev_global compressdev_globals = { 24 .devs = rte_comp_devices, 25 .data = { NULL }, 26 .nb_devs = 0, 27 .max_devs = RTE_COMPRESS_MAX_DEVS 28 }; 29 30 const struct rte_compressdev_capabilities * 31 rte_compressdev_capability_get(uint8_t dev_id, 32 enum rte_comp_algorithm algo) 33 { 34 const struct rte_compressdev_capabilities *capability; 35 struct rte_compressdev_info dev_info; 36 int i = 0; 37 38 if (dev_id >= compressdev_globals.nb_devs) { 39 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id); 40 return NULL; 41 } 42 rte_compressdev_info_get(dev_id, &dev_info); 43 44 while ((capability = &dev_info.capabilities[i++])->algo != 45 RTE_COMP_ALGO_UNSPECIFIED){ 46 if (capability->algo == algo) 47 return capability; 48 } 49 50 return NULL; 51 } 52 53 const char * 54 rte_compressdev_get_feature_name(uint64_t flag) 55 { 56 switch (flag) { 57 case RTE_COMPDEV_FF_HW_ACCELERATED: 58 return "HW_ACCELERATED"; 59 case RTE_COMPDEV_FF_CPU_SSE: 60 return "CPU_SSE"; 61 case RTE_COMPDEV_FF_CPU_AVX: 62 return "CPU_AVX"; 63 case RTE_COMPDEV_FF_CPU_AVX2: 64 return "CPU_AVX2"; 65 case RTE_COMPDEV_FF_CPU_AVX512: 66 return "CPU_AVX512"; 67 case RTE_COMPDEV_FF_CPU_NEON: 68 return "CPU_NEON"; 69 case RTE_COMPDEV_FF_OP_DONE_IN_DEQUEUE: 70 return "OP_DONE_IN_DEQ"; 71 default: 72 return NULL; 73 } 74 } 75 76 static struct rte_compressdev * 77 rte_compressdev_get_dev(uint8_t dev_id) 78 { 79 return &compressdev_globals.devs[dev_id]; 80 } 81 82 struct rte_compressdev * 83 rte_compressdev_pmd_get_named_dev(const char *name) 84 { 85 struct rte_compressdev *dev; 86 unsigned int i; 87 88 if (name == NULL) 89 return NULL; 90 91 for (i = 0; i < compressdev_globals.max_devs; i++) { 92 dev = &compressdev_globals.devs[i]; 93 94 if ((dev->attached == RTE_COMPRESSDEV_ATTACHED) && 95 (strcmp(dev->data->name, name) == 0)) 96 return dev; 97 } 98 99 return NULL; 100 } 101 102 static unsigned int 103 rte_compressdev_is_valid_dev(uint8_t dev_id) 104 { 105 struct rte_compressdev *dev = NULL; 106 107 if (dev_id >= compressdev_globals.nb_devs) 108 return 0; 109 110 dev = rte_compressdev_get_dev(dev_id); 111 if (dev->attached != RTE_COMPRESSDEV_ATTACHED) 112 return 0; 113 else 114 return 1; 115 } 116 117 118 int 119 rte_compressdev_get_dev_id(const char *name) 120 { 121 unsigned int i; 122 123 if (name == NULL) 124 return -1; 125 126 for (i = 0; i < compressdev_globals.nb_devs; i++) 127 if ((strcmp(compressdev_globals.devs[i].data->name, name) 128 == 0) && 129 (compressdev_globals.devs[i].attached == 130 RTE_COMPRESSDEV_ATTACHED)) 131 return i; 132 133 return -1; 134 } 135 136 uint8_t 137 rte_compressdev_count(void) 138 { 139 return compressdev_globals.nb_devs; 140 } 141 142 uint8_t 143 rte_compressdev_devices_get(const char *driver_name, uint8_t *devices, 144 uint8_t nb_devices) 145 { 146 uint8_t i, count = 0; 147 struct rte_compressdev *devs = compressdev_globals.devs; 148 uint8_t max_devs = compressdev_globals.max_devs; 149 150 for (i = 0; i < max_devs && count < nb_devices; i++) { 151 152 if (devs[i].attached == RTE_COMPRESSDEV_ATTACHED) { 153 int cmp; 154 155 cmp = strncmp(devs[i].device->driver->name, 156 driver_name, 157 strlen(driver_name)); 158 159 if (cmp == 0) 160 devices[count++] = devs[i].data->dev_id; 161 } 162 } 163 164 return count; 165 } 166 167 int 168 rte_compressdev_socket_id(uint8_t dev_id) 169 { 170 struct rte_compressdev *dev; 171 172 if (!rte_compressdev_is_valid_dev(dev_id)) 173 return -1; 174 175 dev = rte_compressdev_get_dev(dev_id); 176 177 return dev->data->socket_id; 178 } 179 180 static inline int 181 rte_compressdev_data_alloc(uint8_t dev_id, struct rte_compressdev_data **data, 182 int socket_id) 183 { 184 char mz_name[RTE_COMPRESSDEV_NAME_MAX_LEN]; 185 const struct rte_memzone *mz; 186 int n; 187 188 /* generate memzone name */ 189 n = snprintf(mz_name, sizeof(mz_name), 190 "rte_compressdev_data_%u", dev_id); 191 if (n >= (int)sizeof(mz_name)) 192 return -EINVAL; 193 194 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 195 mz = rte_memzone_reserve(mz_name, 196 sizeof(struct rte_compressdev_data), 197 socket_id, 0); 198 } else 199 mz = rte_memzone_lookup(mz_name); 200 201 if (mz == NULL) 202 return -ENOMEM; 203 204 *data = mz->addr; 205 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 206 memset(*data, 0, sizeof(struct rte_compressdev_data)); 207 208 return 0; 209 } 210 211 static uint8_t 212 rte_compressdev_find_free_device_index(void) 213 { 214 uint8_t dev_id; 215 216 for (dev_id = 0; dev_id < RTE_COMPRESS_MAX_DEVS; dev_id++) { 217 if (rte_comp_devices[dev_id].attached == 218 RTE_COMPRESSDEV_DETACHED) 219 return dev_id; 220 } 221 return RTE_COMPRESS_MAX_DEVS; 222 } 223 224 struct rte_compressdev * 225 rte_compressdev_pmd_allocate(const char *name, int socket_id) 226 { 227 struct rte_compressdev *compressdev; 228 uint8_t dev_id; 229 230 if (rte_compressdev_pmd_get_named_dev(name) != NULL) { 231 COMPRESSDEV_LOG(ERR, 232 "comp device with name %s already allocated!", name); 233 return NULL; 234 } 235 236 dev_id = rte_compressdev_find_free_device_index(); 237 if (dev_id == RTE_COMPRESS_MAX_DEVS) { 238 COMPRESSDEV_LOG(ERR, "Reached maximum number of comp devices"); 239 return NULL; 240 } 241 compressdev = rte_compressdev_get_dev(dev_id); 242 243 if (compressdev->data == NULL) { 244 struct rte_compressdev_data *compressdev_data = 245 compressdev_globals.data[dev_id]; 246 247 int retval = rte_compressdev_data_alloc(dev_id, 248 &compressdev_data, socket_id); 249 250 if (retval < 0 || compressdev_data == NULL) 251 return NULL; 252 253 compressdev->data = compressdev_data; 254 255 strlcpy(compressdev->data->name, name, 256 RTE_COMPRESSDEV_NAME_MAX_LEN); 257 258 compressdev->data->dev_id = dev_id; 259 compressdev->data->socket_id = socket_id; 260 compressdev->data->dev_started = 0; 261 262 compressdev->attached = RTE_COMPRESSDEV_ATTACHED; 263 264 compressdev_globals.nb_devs++; 265 } 266 267 return compressdev; 268 } 269 270 int 271 rte_compressdev_pmd_release_device(struct rte_compressdev *compressdev) 272 { 273 int ret; 274 275 if (compressdev == NULL) 276 return -EINVAL; 277 278 /* Close device only if device operations have been set */ 279 if (compressdev->dev_ops) { 280 ret = rte_compressdev_close(compressdev->data->dev_id); 281 if (ret < 0) 282 return ret; 283 } 284 285 compressdev->attached = RTE_COMPRESSDEV_DETACHED; 286 compressdev_globals.nb_devs--; 287 return 0; 288 } 289 290 uint16_t 291 rte_compressdev_queue_pair_count(uint8_t dev_id) 292 { 293 struct rte_compressdev *dev; 294 295 dev = &rte_comp_devices[dev_id]; 296 return dev->data->nb_queue_pairs; 297 } 298 299 static int 300 rte_compressdev_queue_pairs_config(struct rte_compressdev *dev, 301 uint16_t nb_qpairs, int socket_id) 302 { 303 struct rte_compressdev_info dev_info; 304 void **qp; 305 unsigned int i; 306 307 if ((dev == NULL) || (nb_qpairs < 1)) { 308 COMPRESSDEV_LOG(ERR, "invalid param: dev %p, nb_queues %u", 309 dev, nb_qpairs); 310 return -EINVAL; 311 } 312 313 COMPRESSDEV_LOG(DEBUG, "Setup %d queues pairs on device %u", 314 nb_qpairs, dev->data->dev_id); 315 316 memset(&dev_info, 0, sizeof(struct rte_compressdev_info)); 317 318 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 319 (*dev->dev_ops->dev_infos_get)(dev, &dev_info); 320 321 if ((dev_info.max_nb_queue_pairs != 0) && 322 (nb_qpairs > dev_info.max_nb_queue_pairs)) { 323 COMPRESSDEV_LOG(ERR, "Invalid num queue_pairs (%u) for dev %u", 324 nb_qpairs, dev->data->dev_id); 325 return -EINVAL; 326 } 327 328 if (dev->data->queue_pairs == NULL) { /* first time configuration */ 329 dev->data->queue_pairs = rte_zmalloc_socket( 330 "compressdev->queue_pairs", 331 sizeof(dev->data->queue_pairs[0]) * nb_qpairs, 332 RTE_CACHE_LINE_SIZE, socket_id); 333 334 if (dev->data->queue_pairs == NULL) { 335 dev->data->nb_queue_pairs = 0; 336 COMPRESSDEV_LOG(ERR, 337 "failed to get memory for qp meta data, nb_queues %u", 338 nb_qpairs); 339 return -(ENOMEM); 340 } 341 } else { /* re-configure */ 342 int ret; 343 uint16_t old_nb_queues = dev->data->nb_queue_pairs; 344 345 qp = dev->data->queue_pairs; 346 347 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release, 348 -ENOTSUP); 349 350 for (i = nb_qpairs; i < old_nb_queues; i++) { 351 ret = (*dev->dev_ops->queue_pair_release)(dev, i); 352 if (ret < 0) 353 return ret; 354 } 355 356 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs, 357 RTE_CACHE_LINE_SIZE); 358 if (qp == NULL) { 359 COMPRESSDEV_LOG(ERR, 360 "failed to realloc qp meta data, nb_queues %u", 361 nb_qpairs); 362 return -(ENOMEM); 363 } 364 365 if (nb_qpairs > old_nb_queues) { 366 uint16_t new_qs = nb_qpairs - old_nb_queues; 367 368 memset(qp + old_nb_queues, 0, 369 sizeof(qp[0]) * new_qs); 370 } 371 372 dev->data->queue_pairs = qp; 373 374 } 375 dev->data->nb_queue_pairs = nb_qpairs; 376 return 0; 377 } 378 379 static int 380 rte_compressdev_queue_pairs_release(struct rte_compressdev *dev) 381 { 382 uint16_t num_qps, i; 383 int ret; 384 385 if (dev == NULL) { 386 COMPRESSDEV_LOG(ERR, "invalid param: dev %p", dev); 387 return -EINVAL; 388 } 389 390 num_qps = dev->data->nb_queue_pairs; 391 392 if (num_qps == 0) 393 return 0; 394 395 COMPRESSDEV_LOG(DEBUG, "Free %d queues pairs on device %u", 396 dev->data->nb_queue_pairs, dev->data->dev_id); 397 398 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release, 399 -ENOTSUP); 400 401 for (i = 0; i < num_qps; i++) { 402 ret = (*dev->dev_ops->queue_pair_release)(dev, i); 403 if (ret < 0) 404 return ret; 405 } 406 407 rte_free(dev->data->queue_pairs); 408 dev->data->queue_pairs = NULL; 409 dev->data->nb_queue_pairs = 0; 410 411 return 0; 412 } 413 414 int 415 rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config) 416 { 417 struct rte_compressdev *dev; 418 int diag; 419 420 if (!rte_compressdev_is_valid_dev(dev_id)) { 421 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id); 422 return -EINVAL; 423 } 424 425 dev = &rte_comp_devices[dev_id]; 426 427 if (dev->data->dev_started) { 428 COMPRESSDEV_LOG(ERR, 429 "device %d must be stopped to allow configuration", dev_id); 430 return -EBUSY; 431 } 432 433 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 434 435 /* Setup new number of queue pairs and reconfigure device. */ 436 diag = rte_compressdev_queue_pairs_config(dev, config->nb_queue_pairs, 437 config->socket_id); 438 if (diag != 0) { 439 COMPRESSDEV_LOG(ERR, 440 "dev%d rte_comp_dev_queue_pairs_config = %d", 441 dev_id, diag); 442 return diag; 443 } 444 445 return (*dev->dev_ops->dev_configure)(dev, config); 446 } 447 448 int 449 rte_compressdev_start(uint8_t dev_id) 450 { 451 struct rte_compressdev *dev; 452 int diag; 453 454 COMPRESSDEV_LOG(DEBUG, "Start dev_id=%" PRIu8, dev_id); 455 456 if (!rte_compressdev_is_valid_dev(dev_id)) { 457 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id); 458 return -EINVAL; 459 } 460 461 dev = &rte_comp_devices[dev_id]; 462 463 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 464 465 if (dev->data->dev_started != 0) { 466 COMPRESSDEV_LOG(ERR, 467 "Device with dev_id=%" PRIu8 " already started", dev_id); 468 return 0; 469 } 470 471 diag = (*dev->dev_ops->dev_start)(dev); 472 if (diag == 0) 473 dev->data->dev_started = 1; 474 else 475 return diag; 476 477 return 0; 478 } 479 480 void 481 rte_compressdev_stop(uint8_t dev_id) 482 { 483 struct rte_compressdev *dev; 484 485 if (!rte_compressdev_is_valid_dev(dev_id)) { 486 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id); 487 return; 488 } 489 490 dev = &rte_comp_devices[dev_id]; 491 492 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); 493 494 if (dev->data->dev_started == 0) { 495 COMPRESSDEV_LOG(ERR, 496 "Device with dev_id=%" PRIu8 " already stopped", dev_id); 497 return; 498 } 499 500 (*dev->dev_ops->dev_stop)(dev); 501 dev->data->dev_started = 0; 502 } 503 504 int 505 rte_compressdev_close(uint8_t dev_id) 506 { 507 struct rte_compressdev *dev; 508 int retval; 509 510 if (!rte_compressdev_is_valid_dev(dev_id)) { 511 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id); 512 return -1; 513 } 514 515 dev = &rte_comp_devices[dev_id]; 516 517 /* Device must be stopped before it can be closed */ 518 if (dev->data->dev_started == 1) { 519 COMPRESSDEV_LOG(ERR, "Device %u must be stopped before closing", 520 dev_id); 521 return -EBUSY; 522 } 523 524 /* Free queue pairs memory */ 525 retval = rte_compressdev_queue_pairs_release(dev); 526 527 if (retval < 0) 528 return retval; 529 530 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 531 retval = (*dev->dev_ops->dev_close)(dev); 532 533 if (retval < 0) 534 return retval; 535 536 return 0; 537 } 538 539 int 540 rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 541 uint32_t max_inflight_ops, int socket_id) 542 { 543 struct rte_compressdev *dev; 544 545 if (!rte_compressdev_is_valid_dev(dev_id)) { 546 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id); 547 return -EINVAL; 548 } 549 550 dev = &rte_comp_devices[dev_id]; 551 if (queue_pair_id >= dev->data->nb_queue_pairs) { 552 COMPRESSDEV_LOG(ERR, "Invalid queue_pair_id=%d", queue_pair_id); 553 return -EINVAL; 554 } 555 556 if (dev->data->dev_started) { 557 COMPRESSDEV_LOG(ERR, 558 "device %d must be stopped to allow configuration", dev_id); 559 return -EBUSY; 560 } 561 562 if (max_inflight_ops == 0) { 563 COMPRESSDEV_LOG(ERR, 564 "Invalid maximum number of inflight operations"); 565 return -EINVAL; 566 } 567 568 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP); 569 570 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, 571 max_inflight_ops, socket_id); 572 } 573 574 uint16_t 575 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, 576 struct rte_comp_op **ops, uint16_t nb_ops) 577 { 578 struct rte_compressdev *dev = &rte_comp_devices[dev_id]; 579 580 nb_ops = (*dev->dequeue_burst) 581 (dev->data->queue_pairs[qp_id], ops, nb_ops); 582 583 return nb_ops; 584 } 585 586 uint16_t 587 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, 588 struct rte_comp_op **ops, uint16_t nb_ops) 589 { 590 struct rte_compressdev *dev = &rte_comp_devices[dev_id]; 591 592 return (*dev->enqueue_burst)( 593 dev->data->queue_pairs[qp_id], ops, nb_ops); 594 } 595 596 int 597 rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats) 598 { 599 struct rte_compressdev *dev; 600 601 if (!rte_compressdev_is_valid_dev(dev_id)) { 602 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id); 603 return -ENODEV; 604 } 605 606 if (stats == NULL) { 607 COMPRESSDEV_LOG(ERR, "Invalid stats ptr"); 608 return -EINVAL; 609 } 610 611 dev = &rte_comp_devices[dev_id]; 612 memset(stats, 0, sizeof(*stats)); 613 614 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 615 (*dev->dev_ops->stats_get)(dev, stats); 616 return 0; 617 } 618 619 void 620 rte_compressdev_stats_reset(uint8_t dev_id) 621 { 622 struct rte_compressdev *dev; 623 624 if (!rte_compressdev_is_valid_dev(dev_id)) { 625 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id); 626 return; 627 } 628 629 dev = &rte_comp_devices[dev_id]; 630 631 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset); 632 (*dev->dev_ops->stats_reset)(dev); 633 } 634 635 636 void 637 rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info) 638 { 639 struct rte_compressdev *dev; 640 641 if (dev_id >= compressdev_globals.nb_devs) { 642 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id); 643 return; 644 } 645 646 dev = &rte_comp_devices[dev_id]; 647 648 memset(dev_info, 0, sizeof(struct rte_compressdev_info)); 649 650 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); 651 (*dev->dev_ops->dev_infos_get)(dev, dev_info); 652 653 dev_info->driver_name = dev->device->driver->name; 654 } 655 656 int 657 rte_compressdev_private_xform_create(uint8_t dev_id, 658 const struct rte_comp_xform *xform, 659 void **priv_xform) 660 { 661 struct rte_compressdev *dev; 662 int ret; 663 664 dev = rte_compressdev_get_dev(dev_id); 665 666 if (xform == NULL || priv_xform == NULL || dev == NULL) 667 return -EINVAL; 668 669 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->private_xform_create, -ENOTSUP); 670 ret = (*dev->dev_ops->private_xform_create)(dev, xform, priv_xform); 671 if (ret < 0) { 672 COMPRESSDEV_LOG(ERR, 673 "dev_id %d failed to create private_xform: err=%d", 674 dev_id, ret); 675 return ret; 676 }; 677 678 return 0; 679 } 680 681 int 682 rte_compressdev_private_xform_free(uint8_t dev_id, void *priv_xform) 683 { 684 struct rte_compressdev *dev; 685 int ret; 686 687 dev = rte_compressdev_get_dev(dev_id); 688 689 if (dev == NULL || priv_xform == NULL) 690 return -EINVAL; 691 692 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->private_xform_free, -ENOTSUP); 693 ret = dev->dev_ops->private_xform_free(dev, priv_xform); 694 if (ret < 0) { 695 COMPRESSDEV_LOG(ERR, 696 "dev_id %d failed to free private xform: err=%d", 697 dev_id, ret); 698 return ret; 699 }; 700 701 return 0; 702 } 703 704 int 705 rte_compressdev_stream_create(uint8_t dev_id, 706 const struct rte_comp_xform *xform, 707 void **stream) 708 { 709 struct rte_compressdev *dev; 710 int ret; 711 712 dev = rte_compressdev_get_dev(dev_id); 713 714 if (xform == NULL || dev == NULL || stream == NULL) 715 return -EINVAL; 716 717 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stream_create, -ENOTSUP); 718 ret = (*dev->dev_ops->stream_create)(dev, xform, stream); 719 if (ret < 0) { 720 COMPRESSDEV_LOG(ERR, 721 "dev_id %d failed to create stream: err=%d", 722 dev_id, ret); 723 return ret; 724 }; 725 726 return 0; 727 } 728 729 730 int 731 rte_compressdev_stream_free(uint8_t dev_id, void *stream) 732 { 733 struct rte_compressdev *dev; 734 int ret; 735 736 dev = rte_compressdev_get_dev(dev_id); 737 738 if (dev == NULL || stream == NULL) 739 return -EINVAL; 740 741 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stream_free, -ENOTSUP); 742 ret = dev->dev_ops->stream_free(dev, stream); 743 if (ret < 0) { 744 COMPRESSDEV_LOG(ERR, 745 "dev_id %d failed to free stream: err=%d", 746 dev_id, ret); 747 return ret; 748 }; 749 750 return 0; 751 } 752 753 const char * 754 rte_compressdev_name_get(uint8_t dev_id) 755 { 756 struct rte_compressdev *dev = rte_compressdev_get_dev(dev_id); 757 758 if (dev == NULL) 759 return NULL; 760 761 return dev->data->name; 762 } 763 764 RTE_LOG_REGISTER_DEFAULT(compressdev_logtype, NOTICE); 765