1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <string.h> 7 #include <stdbool.h> 8 9 #include <rte_compat.h> 10 #include <rte_common.h> 11 #include <rte_errno.h> 12 #include <rte_log.h> 13 #include <rte_debug.h> 14 #include <rte_eal.h> 15 #include <rte_malloc.h> 16 #include <rte_mempool.h> 17 #include <rte_memzone.h> 18 #include <rte_lcore.h> 19 #include <rte_dev.h> 20 #include <rte_spinlock.h> 21 #include <rte_tailq.h> 22 #include <rte_interrupts.h> 23 24 #include "rte_bbdev_op.h" 25 #include "rte_bbdev.h" 26 #include "rte_bbdev_pmd.h" 27 28 #define DEV_NAME "BBDEV" 29 30 31 /* BBDev library logging ID */ 32 RTE_LOG_REGISTER(bbdev_logtype, lib.bbdev, NOTICE); 33 34 /* Helper macro for logging */ 35 #define rte_bbdev_log(level, fmt, ...) \ 36 rte_log(RTE_LOG_ ## level, bbdev_logtype, fmt "\n", ##__VA_ARGS__) 37 38 #define rte_bbdev_log_debug(fmt, ...) \ 39 rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \ 40 ##__VA_ARGS__) 41 42 /* Helper macro to check dev_id is valid */ 43 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \ 44 if (dev == NULL) { \ 45 rte_bbdev_log(ERR, "device %u is invalid", dev_id); \ 46 return -ENODEV; \ 47 } \ 48 } while (0) 49 50 /* Helper macro to check dev_ops is valid */ 51 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \ 52 if (dev->dev_ops == NULL) { \ 53 rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \ 54 dev_id); \ 55 return -ENODEV; \ 56 } \ 57 } while (0) 58 59 /* Helper macro to check that driver implements required function pointer */ 60 #define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \ 61 if (func == NULL) { \ 62 rte_bbdev_log(ERR, "device %u does not support %s", \ 63 dev_id, #func); \ 64 return -ENOTSUP; \ 65 } \ 66 } while (0) 67 68 /* Helper macro to check that queue is valid */ 69 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \ 70 if (queue_id >= dev->data->num_queues) { \ 71 rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \ 72 queue_id, dev->data->dev_id); \ 73 return -ERANGE; \ 74 } \ 75 } while (0) 76 77 /* List of callback functions registered by an application */ 78 struct rte_bbdev_callback { 79 TAILQ_ENTRY(rte_bbdev_callback) next; /* Callbacks list */ 80 rte_bbdev_cb_fn cb_fn; /* Callback address */ 81 void *cb_arg; /* Parameter for callback */ 82 void *ret_param; /* Return parameter */ 83 enum rte_bbdev_event_type event; /* Interrupt event type */ 84 uint32_t active; /* Callback is executing */ 85 }; 86 87 /* spinlock for bbdev device callbacks */ 88 static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER; 89 90 /* 91 * Global array of all devices. This is not static because it's used by the 92 * inline enqueue and dequeue functions 93 */ 94 struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS]; 95 96 /* Global array with rte_bbdev_data structures */ 97 static struct rte_bbdev_data *rte_bbdev_data; 98 99 /* Memzone name for global bbdev data pool */ 100 static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data"; 101 102 /* Number of currently valid devices */ 103 static uint16_t num_devs; 104 105 /* Return pointer to device structure, with validity check */ 106 static struct rte_bbdev * 107 get_dev(uint16_t dev_id) 108 { 109 if (rte_bbdev_is_valid(dev_id)) 110 return &rte_bbdev_devices[dev_id]; 111 return NULL; 112 } 113 114 /* Allocate global data array */ 115 static int 116 rte_bbdev_data_alloc(void) 117 { 118 const unsigned int flags = 0; 119 const struct rte_memzone *mz; 120 121 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 122 mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA, 123 RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data), 124 rte_socket_id(), flags); 125 } else 126 mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA); 127 if (mz == NULL) { 128 rte_bbdev_log(CRIT, 129 "Cannot allocate memzone for bbdev port data"); 130 return -ENOMEM; 131 } 132 133 rte_bbdev_data = mz->addr; 134 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 135 memset(rte_bbdev_data, 0, 136 RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data)); 137 return 0; 138 } 139 140 /* 141 * Find data alocated for the device or if not found return first unused bbdev 142 * data. If all structures are in use and none is used by the device return 143 * NULL. 144 */ 145 static struct rte_bbdev_data * 146 find_bbdev_data(const char *name) 147 { 148 uint16_t data_id; 149 150 for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) { 151 if (strlen(rte_bbdev_data[data_id].name) == 0) { 152 memset(&rte_bbdev_data[data_id], 0, 153 sizeof(struct rte_bbdev_data)); 154 return &rte_bbdev_data[data_id]; 155 } else if (strncmp(rte_bbdev_data[data_id].name, name, 156 RTE_BBDEV_NAME_MAX_LEN) == 0) 157 return &rte_bbdev_data[data_id]; 158 } 159 160 return NULL; 161 } 162 163 /* Find lowest device id with no attached device */ 164 static uint16_t 165 find_free_dev_id(void) 166 { 167 uint16_t i; 168 for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) { 169 if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED) 170 return i; 171 } 172 return RTE_BBDEV_MAX_DEVS; 173 } 174 175 struct rte_bbdev * 176 rte_bbdev_allocate(const char *name) 177 { 178 int ret; 179 struct rte_bbdev *bbdev; 180 uint16_t dev_id; 181 182 if (name == NULL) { 183 rte_bbdev_log(ERR, "Invalid null device name"); 184 return NULL; 185 } 186 187 if (rte_bbdev_get_named_dev(name) != NULL) { 188 rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name); 189 return NULL; 190 } 191 192 dev_id = find_free_dev_id(); 193 if (dev_id == RTE_BBDEV_MAX_DEVS) { 194 rte_bbdev_log(ERR, "Reached maximum number of devices"); 195 return NULL; 196 } 197 198 bbdev = &rte_bbdev_devices[dev_id]; 199 200 if (rte_bbdev_data == NULL) { 201 ret = rte_bbdev_data_alloc(); 202 if (ret != 0) 203 return NULL; 204 } 205 206 bbdev->data = find_bbdev_data(name); 207 if (bbdev->data == NULL) { 208 rte_bbdev_log(ERR, 209 "Max BBDevs already allocated in multi-process environment!"); 210 return NULL; 211 } 212 213 __atomic_add_fetch(&bbdev->data->process_cnt, 1, __ATOMIC_RELAXED); 214 bbdev->data->dev_id = dev_id; 215 bbdev->state = RTE_BBDEV_INITIALIZED; 216 217 ret = snprintf(bbdev->data->name, RTE_BBDEV_NAME_MAX_LEN, "%s", name); 218 if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) { 219 rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name); 220 return NULL; 221 } 222 223 /* init user callbacks */ 224 TAILQ_INIT(&(bbdev->list_cbs)); 225 226 num_devs++; 227 228 rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u", 229 name, dev_id, num_devs); 230 231 return bbdev; 232 } 233 234 int 235 rte_bbdev_release(struct rte_bbdev *bbdev) 236 { 237 uint16_t dev_id; 238 struct rte_bbdev_callback *cb, *next; 239 240 if (bbdev == NULL) { 241 rte_bbdev_log(ERR, "NULL bbdev"); 242 return -ENODEV; 243 } 244 dev_id = bbdev->data->dev_id; 245 246 /* free all callbacks from the device's list */ 247 for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) { 248 249 next = TAILQ_NEXT(cb, next); 250 TAILQ_REMOVE(&(bbdev->list_cbs), cb, next); 251 rte_free(cb); 252 } 253 254 /* clear shared BBDev Data if no process is using the device anymore */ 255 if (__atomic_sub_fetch(&bbdev->data->process_cnt, 1, 256 __ATOMIC_RELAXED) == 0) 257 memset(bbdev->data, 0, sizeof(*bbdev->data)); 258 259 memset(bbdev, 0, sizeof(*bbdev)); 260 num_devs--; 261 bbdev->state = RTE_BBDEV_UNUSED; 262 263 rte_bbdev_log_debug( 264 "Un-initialised device id = %u. Num devices = %u", 265 dev_id, num_devs); 266 return 0; 267 } 268 269 struct rte_bbdev * 270 rte_bbdev_get_named_dev(const char *name) 271 { 272 unsigned int i; 273 274 if (name == NULL) { 275 rte_bbdev_log(ERR, "NULL driver name"); 276 return NULL; 277 } 278 279 for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) { 280 struct rte_bbdev *dev = get_dev(i); 281 if (dev && (strncmp(dev->data->name, 282 name, RTE_BBDEV_NAME_MAX_LEN) == 0)) 283 return dev; 284 } 285 286 return NULL; 287 } 288 289 uint16_t 290 rte_bbdev_count(void) 291 { 292 return num_devs; 293 } 294 295 bool 296 rte_bbdev_is_valid(uint16_t dev_id) 297 { 298 if ((dev_id < RTE_BBDEV_MAX_DEVS) && 299 rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED) 300 return true; 301 return false; 302 } 303 304 uint16_t 305 rte_bbdev_find_next(uint16_t dev_id) 306 { 307 dev_id++; 308 for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++) 309 if (rte_bbdev_is_valid(dev_id)) 310 break; 311 return dev_id; 312 } 313 314 int 315 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id) 316 { 317 unsigned int i; 318 int ret; 319 struct rte_bbdev_driver_info dev_info; 320 struct rte_bbdev *dev = get_dev(dev_id); 321 VALID_DEV_OR_RET_ERR(dev, dev_id); 322 323 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 324 325 if (dev->data->started) { 326 rte_bbdev_log(ERR, 327 "Device %u cannot be configured when started", 328 dev_id); 329 return -EBUSY; 330 } 331 332 /* Get device driver information to get max number of queues */ 333 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id); 334 memset(&dev_info, 0, sizeof(dev_info)); 335 dev->dev_ops->info_get(dev, &dev_info); 336 337 if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) { 338 rte_bbdev_log(ERR, 339 "Device %u supports 0 < N <= %u queues, not %u", 340 dev_id, dev_info.max_num_queues, num_queues); 341 return -EINVAL; 342 } 343 344 /* If re-configuration, get driver to free existing internal memory */ 345 if (dev->data->queues != NULL) { 346 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id); 347 for (i = 0; i < dev->data->num_queues; i++) { 348 int ret = dev->dev_ops->queue_release(dev, i); 349 if (ret < 0) { 350 rte_bbdev_log(ERR, 351 "Device %u queue %u release failed", 352 dev_id, i); 353 return ret; 354 } 355 } 356 /* Call optional device close */ 357 if (dev->dev_ops->close) { 358 ret = dev->dev_ops->close(dev); 359 if (ret < 0) { 360 rte_bbdev_log(ERR, 361 "Device %u couldn't be closed", 362 dev_id); 363 return ret; 364 } 365 } 366 rte_free(dev->data->queues); 367 } 368 369 /* Allocate queue pointers */ 370 dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues, 371 sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE, 372 dev->data->socket_id); 373 if (dev->data->queues == NULL) { 374 rte_bbdev_log(ERR, 375 "calloc of %u queues for device %u on socket %i failed", 376 num_queues, dev_id, dev->data->socket_id); 377 return -ENOMEM; 378 } 379 380 dev->data->num_queues = num_queues; 381 382 /* Call optional device configuration */ 383 if (dev->dev_ops->setup_queues) { 384 ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id); 385 if (ret < 0) { 386 rte_bbdev_log(ERR, 387 "Device %u memory configuration failed", 388 dev_id); 389 goto error; 390 } 391 } 392 393 rte_bbdev_log_debug("Device %u set up with %u queues", dev_id, 394 num_queues); 395 return 0; 396 397 error: 398 dev->data->num_queues = 0; 399 rte_free(dev->data->queues); 400 dev->data->queues = NULL; 401 return ret; 402 } 403 404 int 405 rte_bbdev_intr_enable(uint16_t dev_id) 406 { 407 int ret; 408 struct rte_bbdev *dev = get_dev(dev_id); 409 VALID_DEV_OR_RET_ERR(dev, dev_id); 410 411 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 412 413 if (dev->data->started) { 414 rte_bbdev_log(ERR, 415 "Device %u cannot be configured when started", 416 dev_id); 417 return -EBUSY; 418 } 419 420 if (dev->dev_ops->intr_enable) { 421 ret = dev->dev_ops->intr_enable(dev); 422 if (ret < 0) { 423 rte_bbdev_log(ERR, 424 "Device %u interrupts configuration failed", 425 dev_id); 426 return ret; 427 } 428 rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id); 429 return 0; 430 } 431 432 rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id); 433 return -ENOTSUP; 434 } 435 436 int 437 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id, 438 const struct rte_bbdev_queue_conf *conf) 439 { 440 int ret = 0; 441 struct rte_bbdev_driver_info dev_info; 442 struct rte_bbdev *dev = get_dev(dev_id); 443 const struct rte_bbdev_op_cap *p; 444 struct rte_bbdev_queue_conf *stored_conf; 445 const char *op_type_str; 446 VALID_DEV_OR_RET_ERR(dev, dev_id); 447 448 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 449 450 VALID_QUEUE_OR_RET_ERR(queue_id, dev); 451 452 if (dev->data->queues[queue_id].started || dev->data->started) { 453 rte_bbdev_log(ERR, 454 "Queue %u of device %u cannot be configured when started", 455 queue_id, dev_id); 456 return -EBUSY; 457 } 458 459 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id); 460 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id); 461 462 /* Get device driver information to verify config is valid */ 463 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id); 464 memset(&dev_info, 0, sizeof(dev_info)); 465 dev->dev_ops->info_get(dev, &dev_info); 466 467 /* Check configuration is valid */ 468 if (conf != NULL) { 469 if ((conf->op_type == RTE_BBDEV_OP_NONE) && 470 (dev_info.capabilities[0].type == 471 RTE_BBDEV_OP_NONE)) { 472 ret = 1; 473 } else { 474 for (p = dev_info.capabilities; 475 p->type != RTE_BBDEV_OP_NONE; p++) { 476 if (conf->op_type == p->type) { 477 ret = 1; 478 break; 479 } 480 } 481 } 482 if (ret == 0) { 483 rte_bbdev_log(ERR, "Invalid operation type"); 484 return -EINVAL; 485 } 486 if (conf->queue_size > dev_info.queue_size_lim) { 487 rte_bbdev_log(ERR, 488 "Size (%u) of queue %u of device %u must be: <= %u", 489 conf->queue_size, queue_id, dev_id, 490 dev_info.queue_size_lim); 491 return -EINVAL; 492 } 493 if (!rte_is_power_of_2(conf->queue_size)) { 494 rte_bbdev_log(ERR, 495 "Size (%u) of queue %u of device %u must be a power of 2", 496 conf->queue_size, queue_id, dev_id); 497 return -EINVAL; 498 } 499 if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC && 500 conf->priority > dev_info.max_ul_queue_priority) { 501 rte_bbdev_log(ERR, 502 "Priority (%u) of queue %u of bbdev %u must be <= %u", 503 conf->priority, queue_id, dev_id, 504 dev_info.max_ul_queue_priority); 505 return -EINVAL; 506 } 507 if (conf->op_type == RTE_BBDEV_OP_TURBO_ENC && 508 conf->priority > dev_info.max_dl_queue_priority) { 509 rte_bbdev_log(ERR, 510 "Priority (%u) of queue %u of bbdev %u must be <= %u", 511 conf->priority, queue_id, dev_id, 512 dev_info.max_dl_queue_priority); 513 return -EINVAL; 514 } 515 } 516 517 /* Release existing queue (in case of queue reconfiguration) */ 518 if (dev->data->queues[queue_id].queue_private != NULL) { 519 ret = dev->dev_ops->queue_release(dev, queue_id); 520 if (ret < 0) { 521 rte_bbdev_log(ERR, "Device %u queue %u release failed", 522 dev_id, queue_id); 523 return ret; 524 } 525 } 526 527 /* Get driver to setup the queue */ 528 ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ? 529 conf : &dev_info.default_queue_conf); 530 if (ret < 0) { 531 rte_bbdev_log(ERR, 532 "Device %u queue %u setup failed", dev_id, 533 queue_id); 534 return ret; 535 } 536 537 /* Store configuration */ 538 stored_conf = &dev->data->queues[queue_id].conf; 539 memcpy(stored_conf, 540 (conf != NULL) ? conf : &dev_info.default_queue_conf, 541 sizeof(*stored_conf)); 542 543 op_type_str = rte_bbdev_op_type_str(stored_conf->op_type); 544 if (op_type_str == NULL) 545 return -EINVAL; 546 547 rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)", 548 dev_id, queue_id, stored_conf->queue_size, op_type_str, 549 stored_conf->priority); 550 551 return 0; 552 } 553 554 int 555 rte_bbdev_start(uint16_t dev_id) 556 { 557 int i; 558 struct rte_bbdev *dev = get_dev(dev_id); 559 VALID_DEV_OR_RET_ERR(dev, dev_id); 560 561 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 562 563 if (dev->data->started) { 564 rte_bbdev_log_debug("Device %u is already started", dev_id); 565 return 0; 566 } 567 568 if (dev->dev_ops->start) { 569 int ret = dev->dev_ops->start(dev); 570 if (ret < 0) { 571 rte_bbdev_log(ERR, "Device %u start failed", dev_id); 572 return ret; 573 } 574 } 575 576 /* Store new state */ 577 for (i = 0; i < dev->data->num_queues; i++) 578 if (!dev->data->queues[i].conf.deferred_start) 579 dev->data->queues[i].started = true; 580 dev->data->started = true; 581 582 rte_bbdev_log_debug("Started device %u", dev_id); 583 return 0; 584 } 585 586 int 587 rte_bbdev_stop(uint16_t dev_id) 588 { 589 struct rte_bbdev *dev = get_dev(dev_id); 590 VALID_DEV_OR_RET_ERR(dev, dev_id); 591 592 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 593 594 if (!dev->data->started) { 595 rte_bbdev_log_debug("Device %u is already stopped", dev_id); 596 return 0; 597 } 598 599 if (dev->dev_ops->stop) 600 dev->dev_ops->stop(dev); 601 dev->data->started = false; 602 603 rte_bbdev_log_debug("Stopped device %u", dev_id); 604 return 0; 605 } 606 607 int 608 rte_bbdev_close(uint16_t dev_id) 609 { 610 int ret; 611 uint16_t i; 612 struct rte_bbdev *dev = get_dev(dev_id); 613 VALID_DEV_OR_RET_ERR(dev, dev_id); 614 615 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 616 617 if (dev->data->started) { 618 ret = rte_bbdev_stop(dev_id); 619 if (ret < 0) { 620 rte_bbdev_log(ERR, "Device %u stop failed", dev_id); 621 return ret; 622 } 623 } 624 625 /* Free memory used by queues */ 626 for (i = 0; i < dev->data->num_queues; i++) { 627 ret = dev->dev_ops->queue_release(dev, i); 628 if (ret < 0) { 629 rte_bbdev_log(ERR, "Device %u queue %u release failed", 630 dev_id, i); 631 return ret; 632 } 633 } 634 rte_free(dev->data->queues); 635 636 if (dev->dev_ops->close) { 637 ret = dev->dev_ops->close(dev); 638 if (ret < 0) { 639 rte_bbdev_log(ERR, "Device %u close failed", dev_id); 640 return ret; 641 } 642 } 643 644 /* Clear configuration */ 645 dev->data->queues = NULL; 646 dev->data->num_queues = 0; 647 648 rte_bbdev_log_debug("Closed device %u", dev_id); 649 return 0; 650 } 651 652 int 653 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id) 654 { 655 struct rte_bbdev *dev = get_dev(dev_id); 656 VALID_DEV_OR_RET_ERR(dev, dev_id); 657 658 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 659 660 VALID_QUEUE_OR_RET_ERR(queue_id, dev); 661 662 if (dev->data->queues[queue_id].started) { 663 rte_bbdev_log_debug("Queue %u of device %u already started", 664 queue_id, dev_id); 665 return 0; 666 } 667 668 if (dev->dev_ops->queue_start) { 669 int ret = dev->dev_ops->queue_start(dev, queue_id); 670 if (ret < 0) { 671 rte_bbdev_log(ERR, "Device %u queue %u start failed", 672 dev_id, queue_id); 673 return ret; 674 } 675 } 676 dev->data->queues[queue_id].started = true; 677 678 rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id); 679 return 0; 680 } 681 682 int 683 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id) 684 { 685 struct rte_bbdev *dev = get_dev(dev_id); 686 VALID_DEV_OR_RET_ERR(dev, dev_id); 687 688 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 689 690 VALID_QUEUE_OR_RET_ERR(queue_id, dev); 691 692 if (!dev->data->queues[queue_id].started) { 693 rte_bbdev_log_debug("Queue %u of device %u already stopped", 694 queue_id, dev_id); 695 return 0; 696 } 697 698 if (dev->dev_ops->queue_stop) { 699 int ret = dev->dev_ops->queue_stop(dev, queue_id); 700 if (ret < 0) { 701 rte_bbdev_log(ERR, "Device %u queue %u stop failed", 702 dev_id, queue_id); 703 return ret; 704 } 705 } 706 dev->data->queues[queue_id].started = false; 707 708 rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id); 709 return 0; 710 } 711 712 /* Get device statistics */ 713 static void 714 get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats) 715 { 716 unsigned int q_id; 717 for (q_id = 0; q_id < dev->data->num_queues; q_id++) { 718 struct rte_bbdev_stats *q_stats = 719 &dev->data->queues[q_id].queue_stats; 720 721 stats->enqueued_count += q_stats->enqueued_count; 722 stats->dequeued_count += q_stats->dequeued_count; 723 stats->enqueue_err_count += q_stats->enqueue_err_count; 724 stats->dequeue_err_count += q_stats->dequeue_err_count; 725 } 726 rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id); 727 } 728 729 static void 730 reset_stats_in_queues(struct rte_bbdev *dev) 731 { 732 unsigned int q_id; 733 for (q_id = 0; q_id < dev->data->num_queues; q_id++) { 734 struct rte_bbdev_stats *q_stats = 735 &dev->data->queues[q_id].queue_stats; 736 737 memset(q_stats, 0, sizeof(*q_stats)); 738 } 739 rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id); 740 } 741 742 int 743 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats) 744 { 745 struct rte_bbdev *dev = get_dev(dev_id); 746 VALID_DEV_OR_RET_ERR(dev, dev_id); 747 748 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 749 750 if (stats == NULL) { 751 rte_bbdev_log(ERR, "NULL stats structure"); 752 return -EINVAL; 753 } 754 755 memset(stats, 0, sizeof(*stats)); 756 if (dev->dev_ops->stats_get != NULL) 757 dev->dev_ops->stats_get(dev, stats); 758 else 759 get_stats_from_queues(dev, stats); 760 761 rte_bbdev_log_debug("Retrieved stats of device %u", dev_id); 762 return 0; 763 } 764 765 int 766 rte_bbdev_stats_reset(uint16_t dev_id) 767 { 768 struct rte_bbdev *dev = get_dev(dev_id); 769 VALID_DEV_OR_RET_ERR(dev, dev_id); 770 771 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 772 773 if (dev->dev_ops->stats_reset != NULL) 774 dev->dev_ops->stats_reset(dev); 775 else 776 reset_stats_in_queues(dev); 777 778 rte_bbdev_log_debug("Reset stats of device %u", dev_id); 779 return 0; 780 } 781 782 int 783 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info) 784 { 785 struct rte_bbdev *dev = get_dev(dev_id); 786 VALID_DEV_OR_RET_ERR(dev, dev_id); 787 788 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id); 789 790 if (dev_info == NULL) { 791 rte_bbdev_log(ERR, "NULL dev info structure"); 792 return -EINVAL; 793 } 794 795 /* Copy data maintained by device interface layer */ 796 memset(dev_info, 0, sizeof(*dev_info)); 797 dev_info->dev_name = dev->data->name; 798 dev_info->num_queues = dev->data->num_queues; 799 dev_info->device = dev->device; 800 dev_info->socket_id = dev->data->socket_id; 801 dev_info->started = dev->data->started; 802 803 /* Copy data maintained by device driver layer */ 804 dev->dev_ops->info_get(dev, &dev_info->drv); 805 806 rte_bbdev_log_debug("Retrieved info of device %u", dev_id); 807 return 0; 808 } 809 810 int 811 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id, 812 struct rte_bbdev_queue_info *queue_info) 813 { 814 struct rte_bbdev *dev = get_dev(dev_id); 815 VALID_DEV_OR_RET_ERR(dev, dev_id); 816 817 VALID_QUEUE_OR_RET_ERR(queue_id, dev); 818 819 if (queue_info == NULL) { 820 rte_bbdev_log(ERR, "NULL queue info structure"); 821 return -EINVAL; 822 } 823 824 /* Copy data to output */ 825 memset(queue_info, 0, sizeof(*queue_info)); 826 queue_info->conf = dev->data->queues[queue_id].conf; 827 queue_info->started = dev->data->queues[queue_id].started; 828 829 rte_bbdev_log_debug("Retrieved info of queue %u of device %u", 830 queue_id, dev_id); 831 return 0; 832 } 833 834 /* Calculate size needed to store bbdev_op, depending on type */ 835 static unsigned int 836 get_bbdev_op_size(enum rte_bbdev_op_type type) 837 { 838 unsigned int result = 0; 839 switch (type) { 840 case RTE_BBDEV_OP_NONE: 841 result = RTE_MAX(sizeof(struct rte_bbdev_dec_op), 842 sizeof(struct rte_bbdev_enc_op)); 843 break; 844 case RTE_BBDEV_OP_TURBO_DEC: 845 result = sizeof(struct rte_bbdev_dec_op); 846 break; 847 case RTE_BBDEV_OP_TURBO_ENC: 848 result = sizeof(struct rte_bbdev_enc_op); 849 break; 850 case RTE_BBDEV_OP_LDPC_DEC: 851 result = sizeof(struct rte_bbdev_dec_op); 852 break; 853 case RTE_BBDEV_OP_LDPC_ENC: 854 result = sizeof(struct rte_bbdev_enc_op); 855 break; 856 default: 857 break; 858 } 859 860 return result; 861 } 862 863 /* Initialise a bbdev_op structure */ 864 static void 865 bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element, 866 __rte_unused unsigned int n) 867 { 868 enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg; 869 870 if (type == RTE_BBDEV_OP_TURBO_DEC || type == RTE_BBDEV_OP_LDPC_DEC) { 871 struct rte_bbdev_dec_op *op = element; 872 memset(op, 0, mempool->elt_size); 873 op->mempool = mempool; 874 } else if (type == RTE_BBDEV_OP_TURBO_ENC || 875 type == RTE_BBDEV_OP_LDPC_ENC) { 876 struct rte_bbdev_enc_op *op = element; 877 memset(op, 0, mempool->elt_size); 878 op->mempool = mempool; 879 } 880 } 881 882 struct rte_mempool * 883 rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type, 884 unsigned int num_elements, unsigned int cache_size, 885 int socket_id) 886 { 887 struct rte_bbdev_op_pool_private *priv; 888 struct rte_mempool *mp; 889 const char *op_type_str; 890 891 if (name == NULL) { 892 rte_bbdev_log(ERR, "NULL name for op pool"); 893 return NULL; 894 } 895 896 if (type >= RTE_BBDEV_OP_TYPE_COUNT) { 897 rte_bbdev_log(ERR, 898 "Invalid op type (%u), should be less than %u", 899 type, RTE_BBDEV_OP_TYPE_COUNT); 900 return NULL; 901 } 902 903 mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type), 904 cache_size, sizeof(struct rte_bbdev_op_pool_private), 905 NULL, NULL, bbdev_op_init, &type, socket_id, 0); 906 if (mp == NULL) { 907 rte_bbdev_log(ERR, 908 "Failed to create op pool %s (num ops=%u, op size=%u) with error: %s", 909 name, num_elements, get_bbdev_op_size(type), 910 rte_strerror(rte_errno)); 911 return NULL; 912 } 913 914 op_type_str = rte_bbdev_op_type_str(type); 915 if (op_type_str == NULL) 916 return NULL; 917 918 rte_bbdev_log_debug( 919 "Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)", 920 name, num_elements, op_type_str, cache_size, socket_id, 921 get_bbdev_op_size(type)); 922 923 priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp); 924 priv->type = type; 925 926 return mp; 927 } 928 929 int 930 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event, 931 rte_bbdev_cb_fn cb_fn, void *cb_arg) 932 { 933 struct rte_bbdev_callback *user_cb; 934 struct rte_bbdev *dev = get_dev(dev_id); 935 VALID_DEV_OR_RET_ERR(dev, dev_id); 936 937 if (event >= RTE_BBDEV_EVENT_MAX) { 938 rte_bbdev_log(ERR, 939 "Invalid event type (%u), should be less than %u", 940 event, RTE_BBDEV_EVENT_MAX); 941 return -EINVAL; 942 } 943 944 if (cb_fn == NULL) { 945 rte_bbdev_log(ERR, "NULL callback function"); 946 return -EINVAL; 947 } 948 949 rte_spinlock_lock(&rte_bbdev_cb_lock); 950 951 TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) { 952 if (user_cb->cb_fn == cb_fn && 953 user_cb->cb_arg == cb_arg && 954 user_cb->event == event) 955 break; 956 } 957 958 /* create a new callback. */ 959 if (user_cb == NULL) { 960 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 961 sizeof(struct rte_bbdev_callback), 0); 962 if (user_cb != NULL) { 963 user_cb->cb_fn = cb_fn; 964 user_cb->cb_arg = cb_arg; 965 user_cb->event = event; 966 TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next); 967 } 968 } 969 970 rte_spinlock_unlock(&rte_bbdev_cb_lock); 971 return (user_cb == NULL) ? -ENOMEM : 0; 972 } 973 974 int 975 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event, 976 rte_bbdev_cb_fn cb_fn, void *cb_arg) 977 { 978 int ret = 0; 979 struct rte_bbdev_callback *cb, *next; 980 struct rte_bbdev *dev = get_dev(dev_id); 981 VALID_DEV_OR_RET_ERR(dev, dev_id); 982 983 if (event >= RTE_BBDEV_EVENT_MAX) { 984 rte_bbdev_log(ERR, 985 "Invalid event type (%u), should be less than %u", 986 event, RTE_BBDEV_EVENT_MAX); 987 return -EINVAL; 988 } 989 990 if (cb_fn == NULL) { 991 rte_bbdev_log(ERR, 992 "NULL callback function cannot be unregistered"); 993 return -EINVAL; 994 } 995 996 dev = &rte_bbdev_devices[dev_id]; 997 rte_spinlock_lock(&rte_bbdev_cb_lock); 998 999 for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) { 1000 1001 next = TAILQ_NEXT(cb, next); 1002 1003 if (cb->cb_fn != cb_fn || cb->event != event || 1004 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 1005 continue; 1006 1007 /* If this callback is not executing right now, remove it. */ 1008 if (cb->active == 0) { 1009 TAILQ_REMOVE(&(dev->list_cbs), cb, next); 1010 rte_free(cb); 1011 } else 1012 ret = -EAGAIN; 1013 } 1014 1015 rte_spinlock_unlock(&rte_bbdev_cb_lock); 1016 return ret; 1017 } 1018 1019 void 1020 rte_bbdev_pmd_callback_process(struct rte_bbdev *dev, 1021 enum rte_bbdev_event_type event, void *ret_param) 1022 { 1023 struct rte_bbdev_callback *cb_lst; 1024 struct rte_bbdev_callback dev_cb; 1025 1026 if (dev == NULL) { 1027 rte_bbdev_log(ERR, "NULL device"); 1028 return; 1029 } 1030 1031 if (dev->data == NULL) { 1032 rte_bbdev_log(ERR, "NULL data structure"); 1033 return; 1034 } 1035 1036 if (event >= RTE_BBDEV_EVENT_MAX) { 1037 rte_bbdev_log(ERR, 1038 "Invalid event type (%u), should be less than %u", 1039 event, RTE_BBDEV_EVENT_MAX); 1040 return; 1041 } 1042 1043 rte_spinlock_lock(&rte_bbdev_cb_lock); 1044 TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) { 1045 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 1046 continue; 1047 dev_cb = *cb_lst; 1048 cb_lst->active = 1; 1049 if (ret_param != NULL) 1050 dev_cb.ret_param = ret_param; 1051 1052 rte_spinlock_unlock(&rte_bbdev_cb_lock); 1053 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event, 1054 dev_cb.cb_arg, dev_cb.ret_param); 1055 rte_spinlock_lock(&rte_bbdev_cb_lock); 1056 cb_lst->active = 0; 1057 } 1058 rte_spinlock_unlock(&rte_bbdev_cb_lock); 1059 } 1060 1061 int 1062 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id) 1063 { 1064 struct rte_bbdev *dev = get_dev(dev_id); 1065 VALID_DEV_OR_RET_ERR(dev, dev_id); 1066 VALID_QUEUE_OR_RET_ERR(queue_id, dev); 1067 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 1068 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id); 1069 return dev->dev_ops->queue_intr_enable(dev, queue_id); 1070 } 1071 1072 int 1073 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id) 1074 { 1075 struct rte_bbdev *dev = get_dev(dev_id); 1076 VALID_DEV_OR_RET_ERR(dev, dev_id); 1077 VALID_QUEUE_OR_RET_ERR(queue_id, dev); 1078 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 1079 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id); 1080 return dev->dev_ops->queue_intr_disable(dev, queue_id); 1081 } 1082 1083 int 1084 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op, 1085 void *data) 1086 { 1087 uint32_t vec; 1088 struct rte_bbdev *dev = get_dev(dev_id); 1089 struct rte_intr_handle *intr_handle; 1090 int ret; 1091 1092 VALID_DEV_OR_RET_ERR(dev, dev_id); 1093 VALID_QUEUE_OR_RET_ERR(queue_id, dev); 1094 1095 intr_handle = dev->intr_handle; 1096 if (!intr_handle || !intr_handle->intr_vec) { 1097 rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id); 1098 return -ENOTSUP; 1099 } 1100 1101 if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) { 1102 rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n", 1103 dev_id, queue_id); 1104 return -ENOTSUP; 1105 } 1106 1107 vec = intr_handle->intr_vec[queue_id]; 1108 ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 1109 if (ret && (ret != -EEXIST)) { 1110 rte_bbdev_log(ERR, 1111 "dev %u q %u int ctl error op %d epfd %d vec %u\n", 1112 dev_id, queue_id, op, epfd, vec); 1113 return ret; 1114 } 1115 1116 return 0; 1117 } 1118 1119 1120 const char * 1121 rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type) 1122 { 1123 static const char * const op_types[] = { 1124 "RTE_BBDEV_OP_NONE", 1125 "RTE_BBDEV_OP_TURBO_DEC", 1126 "RTE_BBDEV_OP_TURBO_ENC", 1127 "RTE_BBDEV_OP_LDPC_DEC", 1128 "RTE_BBDEV_OP_LDPC_ENC", 1129 }; 1130 1131 if (op_type < RTE_BBDEV_OP_TYPE_COUNT) 1132 return op_types[op_type]; 1133 1134 rte_bbdev_log(ERR, "Invalid operation type"); 1135 return NULL; 1136 } 1137