1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017-2018 Intel Corporation 3 */ 4 5 #include <string.h> 6 #include <stdarg.h> 7 #include <stdio.h> 8 #include <inttypes.h> 9 10 #include <rte_string_fns.h> 11 #include <rte_malloc.h> 12 #include <rte_eal.h> 13 #include <rte_memzone.h> 14 15 #include "rte_compressdev.h" 16 #include "rte_compressdev_internal.h" 17 #include "rte_compressdev_pmd.h" 18 19 #define RTE_COMPRESSDEV_DETACHED (0) 20 #define RTE_COMPRESSDEV_ATTACHED (1) 21 22 static struct rte_compressdev rte_comp_devices[RTE_COMPRESS_MAX_DEVS]; 23 24 static struct rte_compressdev_global compressdev_globals = { 25 .devs = rte_comp_devices, 26 .data = { NULL }, 27 .nb_devs = 0, 28 .max_devs = RTE_COMPRESS_MAX_DEVS 29 }; 30 31 const struct rte_compressdev_capabilities * 32 rte_compressdev_capability_get(uint8_t dev_id, 33 enum rte_comp_algorithm algo) 34 { 35 const struct rte_compressdev_capabilities *capability; 36 struct rte_compressdev_info dev_info; 37 int i = 0; 38 39 if (dev_id >= compressdev_globals.nb_devs) { 40 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id); 41 return NULL; 42 } 43 rte_compressdev_info_get(dev_id, &dev_info); 44 45 while ((capability = &dev_info.capabilities[i++])->algo != 46 RTE_COMP_ALGO_UNSPECIFIED){ 47 if (capability->algo == algo) 48 return capability; 49 } 50 51 return NULL; 52 } 53 54 const char * 55 rte_compressdev_get_feature_name(uint64_t flag) 56 { 57 switch (flag) { 58 case RTE_COMPDEV_FF_HW_ACCELERATED: 59 return "HW_ACCELERATED"; 60 case RTE_COMPDEV_FF_CPU_SSE: 61 return "CPU_SSE"; 62 case RTE_COMPDEV_FF_CPU_AVX: 63 return "CPU_AVX"; 64 case RTE_COMPDEV_FF_CPU_AVX2: 65 return "CPU_AVX2"; 66 case RTE_COMPDEV_FF_CPU_AVX512: 67 return "CPU_AVX512"; 68 case RTE_COMPDEV_FF_CPU_NEON: 69 return "CPU_NEON"; 70 case RTE_COMPDEV_FF_OP_DONE_IN_DEQUEUE: 71 return "OP_DONE_IN_DEQ"; 72 default: 73 return NULL; 74 } 75 } 76 77 static struct rte_compressdev * 78 rte_compressdev_get_dev(uint8_t dev_id) 79 { 80 return &compressdev_globals.devs[dev_id]; 81 } 82 83 struct rte_compressdev * 84 rte_compressdev_pmd_get_named_dev(const char *name) 85 { 86 struct rte_compressdev *dev; 87 unsigned int i; 88 89 if (name == NULL) 90 return NULL; 91 92 for (i = 0; i < compressdev_globals.max_devs; i++) { 93 dev = &compressdev_globals.devs[i]; 94 95 if ((dev->attached == RTE_COMPRESSDEV_ATTACHED) && 96 (strcmp(dev->data->name, name) == 0)) 97 return dev; 98 } 99 100 return NULL; 101 } 102 103 static unsigned int 104 rte_compressdev_is_valid_dev(uint8_t dev_id) 105 { 106 struct rte_compressdev *dev = NULL; 107 108 if (dev_id >= compressdev_globals.nb_devs) 109 return 0; 110 111 dev = rte_compressdev_get_dev(dev_id); 112 if (dev->attached != RTE_COMPRESSDEV_ATTACHED) 113 return 0; 114 else 115 return 1; 116 } 117 118 119 int 120 rte_compressdev_get_dev_id(const char *name) 121 { 122 unsigned int i; 123 124 if (name == NULL) 125 return -1; 126 127 for (i = 0; i < compressdev_globals.nb_devs; i++) 128 if ((strcmp(compressdev_globals.devs[i].data->name, name) 129 == 0) && 130 (compressdev_globals.devs[i].attached == 131 RTE_COMPRESSDEV_ATTACHED)) 132 return i; 133 134 return -1; 135 } 136 137 uint8_t 138 rte_compressdev_count(void) 139 { 140 return compressdev_globals.nb_devs; 141 } 142 143 uint8_t 144 rte_compressdev_devices_get(const char *driver_name, uint8_t *devices, 145 uint8_t nb_devices) 146 { 147 uint8_t i, count = 0; 148 struct rte_compressdev *devs = compressdev_globals.devs; 149 uint8_t max_devs = compressdev_globals.max_devs; 150 151 for (i = 0; i < max_devs && count < nb_devices; i++) { 152 153 if (devs[i].attached == RTE_COMPRESSDEV_ATTACHED) { 154 int cmp; 155 156 cmp = strncmp(devs[i].device->driver->name, 157 driver_name, 158 strlen(driver_name)); 159 160 if (cmp == 0) 161 devices[count++] = devs[i].data->dev_id; 162 } 163 } 164 165 return count; 166 } 167 168 int 169 rte_compressdev_socket_id(uint8_t dev_id) 170 { 171 struct rte_compressdev *dev; 172 173 if (!rte_compressdev_is_valid_dev(dev_id)) 174 return -1; 175 176 dev = rte_compressdev_get_dev(dev_id); 177 178 return dev->data->socket_id; 179 } 180 181 static inline int 182 rte_compressdev_data_alloc(uint8_t dev_id, struct rte_compressdev_data **data, 183 int socket_id) 184 { 185 char mz_name[RTE_COMPRESSDEV_NAME_MAX_LEN]; 186 const struct rte_memzone *mz; 187 int n; 188 189 /* generate memzone name */ 190 n = snprintf(mz_name, sizeof(mz_name), 191 "rte_compressdev_data_%u", dev_id); 192 if (n >= (int)sizeof(mz_name)) 193 return -EINVAL; 194 195 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 196 mz = rte_memzone_reserve(mz_name, 197 sizeof(struct rte_compressdev_data), 198 socket_id, 0); 199 } else 200 mz = rte_memzone_lookup(mz_name); 201 202 if (mz == NULL) 203 return -ENOMEM; 204 205 *data = mz->addr; 206 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 207 memset(*data, 0, sizeof(struct rte_compressdev_data)); 208 209 return 0; 210 } 211 212 static uint8_t 213 rte_compressdev_find_free_device_index(void) 214 { 215 uint8_t dev_id; 216 217 for (dev_id = 0; dev_id < RTE_COMPRESS_MAX_DEVS; dev_id++) { 218 if (rte_comp_devices[dev_id].attached == 219 RTE_COMPRESSDEV_DETACHED) 220 return dev_id; 221 } 222 return RTE_COMPRESS_MAX_DEVS; 223 } 224 225 struct rte_compressdev * 226 rte_compressdev_pmd_allocate(const char *name, int socket_id) 227 { 228 struct rte_compressdev *compressdev; 229 uint8_t dev_id; 230 231 if (rte_compressdev_pmd_get_named_dev(name) != NULL) { 232 COMPRESSDEV_LOG(ERR, 233 "comp device with name %s already allocated!", name); 234 return NULL; 235 } 236 237 dev_id = rte_compressdev_find_free_device_index(); 238 if (dev_id == RTE_COMPRESS_MAX_DEVS) { 239 COMPRESSDEV_LOG(ERR, "Reached maximum number of comp devices"); 240 return NULL; 241 } 242 compressdev = rte_compressdev_get_dev(dev_id); 243 244 if (compressdev->data == NULL) { 245 struct rte_compressdev_data *compressdev_data = 246 compressdev_globals.data[dev_id]; 247 248 int retval = rte_compressdev_data_alloc(dev_id, 249 &compressdev_data, socket_id); 250 251 if (retval < 0 || compressdev_data == NULL) 252 return NULL; 253 254 compressdev->data = compressdev_data; 255 256 strlcpy(compressdev->data->name, name, 257 RTE_COMPRESSDEV_NAME_MAX_LEN); 258 259 compressdev->data->dev_id = dev_id; 260 compressdev->data->socket_id = socket_id; 261 compressdev->data->dev_started = 0; 262 263 compressdev->attached = RTE_COMPRESSDEV_ATTACHED; 264 265 compressdev_globals.nb_devs++; 266 } 267 268 return compressdev; 269 } 270 271 int 272 rte_compressdev_pmd_release_device(struct rte_compressdev *compressdev) 273 { 274 int ret; 275 276 if (compressdev == NULL) 277 return -EINVAL; 278 279 /* Close device only if device operations have been set */ 280 if (compressdev->dev_ops) { 281 ret = rte_compressdev_close(compressdev->data->dev_id); 282 if (ret < 0) 283 return ret; 284 } 285 286 compressdev->attached = RTE_COMPRESSDEV_DETACHED; 287 compressdev_globals.nb_devs--; 288 return 0; 289 } 290 291 uint16_t 292 rte_compressdev_queue_pair_count(uint8_t dev_id) 293 { 294 struct rte_compressdev *dev; 295 296 dev = &rte_comp_devices[dev_id]; 297 return dev->data->nb_queue_pairs; 298 } 299 300 static int 301 rte_compressdev_queue_pairs_config(struct rte_compressdev *dev, 302 uint16_t nb_qpairs, int socket_id) 303 { 304 struct rte_compressdev_info dev_info; 305 void **qp; 306 unsigned int i; 307 308 if ((dev == NULL) || (nb_qpairs < 1)) { 309 COMPRESSDEV_LOG(ERR, "invalid param: dev %p, nb_queues %u", 310 dev, nb_qpairs); 311 return -EINVAL; 312 } 313 314 COMPRESSDEV_LOG(DEBUG, "Setup %d queues pairs on device %u", 315 nb_qpairs, dev->data->dev_id); 316 317 memset(&dev_info, 0, sizeof(struct rte_compressdev_info)); 318 319 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 320 (*dev->dev_ops->dev_infos_get)(dev, &dev_info); 321 322 if ((dev_info.max_nb_queue_pairs != 0) && 323 (nb_qpairs > dev_info.max_nb_queue_pairs)) { 324 COMPRESSDEV_LOG(ERR, "Invalid num queue_pairs (%u) for dev %u", 325 nb_qpairs, dev->data->dev_id); 326 return -EINVAL; 327 } 328 329 if (dev->data->queue_pairs == NULL) { /* first time configuration */ 330 dev->data->queue_pairs = rte_zmalloc_socket( 331 "compressdev->queue_pairs", 332 sizeof(dev->data->queue_pairs[0]) * nb_qpairs, 333 RTE_CACHE_LINE_SIZE, socket_id); 334 335 if (dev->data->queue_pairs == NULL) { 336 dev->data->nb_queue_pairs = 0; 337 COMPRESSDEV_LOG(ERR, 338 "failed to get memory for qp meta data, nb_queues %u", 339 nb_qpairs); 340 return -(ENOMEM); 341 } 342 } else { /* re-configure */ 343 int ret; 344 uint16_t old_nb_queues = dev->data->nb_queue_pairs; 345 346 qp = dev->data->queue_pairs; 347 348 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release, 349 -ENOTSUP); 350 351 for (i = nb_qpairs; i < old_nb_queues; i++) { 352 ret = (*dev->dev_ops->queue_pair_release)(dev, i); 353 if (ret < 0) 354 return ret; 355 } 356 357 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs, 358 RTE_CACHE_LINE_SIZE); 359 if (qp == NULL) { 360 COMPRESSDEV_LOG(ERR, 361 "failed to realloc qp meta data, nb_queues %u", 362 nb_qpairs); 363 return -(ENOMEM); 364 } 365 366 if (nb_qpairs > old_nb_queues) { 367 uint16_t new_qs = nb_qpairs - old_nb_queues; 368 369 memset(qp + old_nb_queues, 0, 370 sizeof(qp[0]) * new_qs); 371 } 372 373 dev->data->queue_pairs = qp; 374 375 } 376 dev->data->nb_queue_pairs = nb_qpairs; 377 return 0; 378 } 379 380 static int 381 rte_compressdev_queue_pairs_release(struct rte_compressdev *dev) 382 { 383 uint16_t num_qps, i; 384 int ret; 385 386 if (dev == NULL) { 387 COMPRESSDEV_LOG(ERR, "invalid param: dev %p", dev); 388 return -EINVAL; 389 } 390 391 num_qps = dev->data->nb_queue_pairs; 392 393 if (num_qps == 0) 394 return 0; 395 396 COMPRESSDEV_LOG(DEBUG, "Free %d queues pairs on device %u", 397 dev->data->nb_queue_pairs, dev->data->dev_id); 398 399 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release, 400 -ENOTSUP); 401 402 for (i = 0; i < num_qps; i++) { 403 ret = (*dev->dev_ops->queue_pair_release)(dev, i); 404 if (ret < 0) 405 return ret; 406 } 407 408 rte_free(dev->data->queue_pairs); 409 dev->data->queue_pairs = NULL; 410 dev->data->nb_queue_pairs = 0; 411 412 return 0; 413 } 414 415 int 416 rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config) 417 { 418 struct rte_compressdev *dev; 419 int diag; 420 421 if (!rte_compressdev_is_valid_dev(dev_id)) { 422 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id); 423 return -EINVAL; 424 } 425 426 dev = &rte_comp_devices[dev_id]; 427 428 if (dev->data->dev_started) { 429 COMPRESSDEV_LOG(ERR, 430 "device %d must be stopped to allow configuration", dev_id); 431 return -EBUSY; 432 } 433 434 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 435 436 /* Setup new number of queue pairs and reconfigure device. */ 437 diag = rte_compressdev_queue_pairs_config(dev, config->nb_queue_pairs, 438 config->socket_id); 439 if (diag != 0) { 440 COMPRESSDEV_LOG(ERR, 441 "dev%d rte_comp_dev_queue_pairs_config = %d", 442 dev_id, diag); 443 return diag; 444 } 445 446 return (*dev->dev_ops->dev_configure)(dev, config); 447 } 448 449 int 450 rte_compressdev_start(uint8_t dev_id) 451 { 452 struct rte_compressdev *dev; 453 int diag; 454 455 COMPRESSDEV_LOG(DEBUG, "Start dev_id=%" PRIu8, dev_id); 456 457 if (!rte_compressdev_is_valid_dev(dev_id)) { 458 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id); 459 return -EINVAL; 460 } 461 462 dev = &rte_comp_devices[dev_id]; 463 464 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 465 466 if (dev->data->dev_started != 0) { 467 COMPRESSDEV_LOG(ERR, 468 "Device with dev_id=%" PRIu8 " already started", dev_id); 469 return 0; 470 } 471 472 diag = (*dev->dev_ops->dev_start)(dev); 473 if (diag == 0) 474 dev->data->dev_started = 1; 475 else 476 return diag; 477 478 return 0; 479 } 480 481 void 482 rte_compressdev_stop(uint8_t dev_id) 483 { 484 struct rte_compressdev *dev; 485 486 if (!rte_compressdev_is_valid_dev(dev_id)) { 487 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id); 488 return; 489 } 490 491 dev = &rte_comp_devices[dev_id]; 492 493 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); 494 495 if (dev->data->dev_started == 0) { 496 COMPRESSDEV_LOG(ERR, 497 "Device with dev_id=%" PRIu8 " already stopped", dev_id); 498 return; 499 } 500 501 (*dev->dev_ops->dev_stop)(dev); 502 dev->data->dev_started = 0; 503 } 504 505 int 506 rte_compressdev_close(uint8_t dev_id) 507 { 508 struct rte_compressdev *dev; 509 int retval; 510 511 if (!rte_compressdev_is_valid_dev(dev_id)) { 512 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id); 513 return -1; 514 } 515 516 dev = &rte_comp_devices[dev_id]; 517 518 /* Device must be stopped before it can be closed */ 519 if (dev->data->dev_started == 1) { 520 COMPRESSDEV_LOG(ERR, "Device %u must be stopped before closing", 521 dev_id); 522 return -EBUSY; 523 } 524 525 /* Free queue pairs memory */ 526 retval = rte_compressdev_queue_pairs_release(dev); 527 528 if (retval < 0) 529 return retval; 530 531 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 532 retval = (*dev->dev_ops->dev_close)(dev); 533 534 if (retval < 0) 535 return retval; 536 537 return 0; 538 } 539 540 int 541 rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 542 uint32_t max_inflight_ops, int socket_id) 543 { 544 struct rte_compressdev *dev; 545 546 if (!rte_compressdev_is_valid_dev(dev_id)) { 547 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id); 548 return -EINVAL; 549 } 550 551 dev = &rte_comp_devices[dev_id]; 552 if (queue_pair_id >= dev->data->nb_queue_pairs) { 553 COMPRESSDEV_LOG(ERR, "Invalid queue_pair_id=%d", queue_pair_id); 554 return -EINVAL; 555 } 556 557 if (dev->data->dev_started) { 558 COMPRESSDEV_LOG(ERR, 559 "device %d must be stopped to allow configuration", dev_id); 560 return -EBUSY; 561 } 562 563 if (max_inflight_ops == 0) { 564 COMPRESSDEV_LOG(ERR, 565 "Invalid maximum number of inflight operations"); 566 return -EINVAL; 567 } 568 569 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP); 570 571 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, 572 max_inflight_ops, socket_id); 573 } 574 575 uint16_t 576 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, 577 struct rte_comp_op **ops, uint16_t nb_ops) 578 { 579 struct rte_compressdev *dev = &rte_comp_devices[dev_id]; 580 581 nb_ops = (*dev->dequeue_burst) 582 (dev->data->queue_pairs[qp_id], ops, nb_ops); 583 584 return nb_ops; 585 } 586 587 uint16_t 588 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, 589 struct rte_comp_op **ops, uint16_t nb_ops) 590 { 591 struct rte_compressdev *dev = &rte_comp_devices[dev_id]; 592 593 return (*dev->enqueue_burst)( 594 dev->data->queue_pairs[qp_id], ops, nb_ops); 595 } 596 597 int 598 rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats) 599 { 600 struct rte_compressdev *dev; 601 602 if (!rte_compressdev_is_valid_dev(dev_id)) { 603 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id); 604 return -ENODEV; 605 } 606 607 if (stats == NULL) { 608 COMPRESSDEV_LOG(ERR, "Invalid stats ptr"); 609 return -EINVAL; 610 } 611 612 dev = &rte_comp_devices[dev_id]; 613 memset(stats, 0, sizeof(*stats)); 614 615 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 616 (*dev->dev_ops->stats_get)(dev, stats); 617 return 0; 618 } 619 620 void 621 rte_compressdev_stats_reset(uint8_t dev_id) 622 { 623 struct rte_compressdev *dev; 624 625 if (!rte_compressdev_is_valid_dev(dev_id)) { 626 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id); 627 return; 628 } 629 630 dev = &rte_comp_devices[dev_id]; 631 632 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset); 633 (*dev->dev_ops->stats_reset)(dev); 634 } 635 636 637 void 638 rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info) 639 { 640 struct rte_compressdev *dev; 641 642 if (dev_id >= compressdev_globals.nb_devs) { 643 COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id); 644 return; 645 } 646 647 dev = &rte_comp_devices[dev_id]; 648 649 memset(dev_info, 0, sizeof(struct rte_compressdev_info)); 650 651 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); 652 (*dev->dev_ops->dev_infos_get)(dev, dev_info); 653 654 dev_info->driver_name = dev->device->driver->name; 655 } 656 657 int 658 rte_compressdev_private_xform_create(uint8_t dev_id, 659 const struct rte_comp_xform *xform, 660 void **priv_xform) 661 { 662 struct rte_compressdev *dev; 663 int ret; 664 665 dev = rte_compressdev_get_dev(dev_id); 666 667 if (xform == NULL || priv_xform == NULL || dev == NULL) 668 return -EINVAL; 669 670 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->private_xform_create, -ENOTSUP); 671 ret = (*dev->dev_ops->private_xform_create)(dev, xform, priv_xform); 672 if (ret < 0) { 673 COMPRESSDEV_LOG(ERR, 674 "dev_id %d failed to create private_xform: err=%d", 675 dev_id, ret); 676 return ret; 677 }; 678 679 return 0; 680 } 681 682 int 683 rte_compressdev_private_xform_free(uint8_t dev_id, void *priv_xform) 684 { 685 struct rte_compressdev *dev; 686 int ret; 687 688 dev = rte_compressdev_get_dev(dev_id); 689 690 if (dev == NULL || priv_xform == NULL) 691 return -EINVAL; 692 693 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->private_xform_free, -ENOTSUP); 694 ret = dev->dev_ops->private_xform_free(dev, priv_xform); 695 if (ret < 0) { 696 COMPRESSDEV_LOG(ERR, 697 "dev_id %d failed to free private xform: err=%d", 698 dev_id, ret); 699 return ret; 700 }; 701 702 return 0; 703 } 704 705 int 706 rte_compressdev_stream_create(uint8_t dev_id, 707 const struct rte_comp_xform *xform, 708 void **stream) 709 { 710 struct rte_compressdev *dev; 711 int ret; 712 713 dev = rte_compressdev_get_dev(dev_id); 714 715 if (xform == NULL || dev == NULL || stream == NULL) 716 return -EINVAL; 717 718 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stream_create, -ENOTSUP); 719 ret = (*dev->dev_ops->stream_create)(dev, xform, stream); 720 if (ret < 0) { 721 COMPRESSDEV_LOG(ERR, 722 "dev_id %d failed to create stream: err=%d", 723 dev_id, ret); 724 return ret; 725 }; 726 727 return 0; 728 } 729 730 731 int 732 rte_compressdev_stream_free(uint8_t dev_id, void *stream) 733 { 734 struct rte_compressdev *dev; 735 int ret; 736 737 dev = rte_compressdev_get_dev(dev_id); 738 739 if (dev == NULL || stream == NULL) 740 return -EINVAL; 741 742 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stream_free, -ENOTSUP); 743 ret = dev->dev_ops->stream_free(dev, stream); 744 if (ret < 0) { 745 COMPRESSDEV_LOG(ERR, 746 "dev_id %d failed to free stream: err=%d", 747 dev_id, ret); 748 return ret; 749 }; 750 751 return 0; 752 } 753 754 const char * 755 rte_compressdev_name_get(uint8_t dev_id) 756 { 757 struct rte_compressdev *dev = rte_compressdev_get_dev(dev_id); 758 759 if (dev == NULL) 760 return NULL; 761 762 return dev->data->name; 763 } 764 765 RTE_LOG_REGISTER_DEFAULT(compressdev_logtype, NOTICE); 766