1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include <string.h> 7 #include <stdbool.h> 8 #include <rte_common.h> 9 #include <dev_driver.h> 10 #include <rte_errno.h> 11 #include <rte_cryptodev.h> 12 #include <cryptodev_pmd.h> 13 #include <rte_log.h> 14 #include <rte_malloc.h> 15 #include <rte_service_component.h> 16 17 #include "rte_eventdev.h" 18 #include "eventdev_pmd.h" 19 #include "eventdev_trace.h" 20 #include "rte_event_crypto_adapter.h" 21 22 #define BATCH_SIZE 32 23 #define DEFAULT_MAX_NB 128 24 #define CRYPTO_ADAPTER_NAME_LEN 32 25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32 26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100 27 28 #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (BATCH_SIZE + BATCH_SIZE) 29 #define CRYPTO_ADAPTER_BUFFER_SZ 1024 30 31 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD 32 * iterations of eca_crypto_adapter_enq_run() 33 */ 34 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024 35 36 struct crypto_ops_circular_buffer { 37 /* index of head element in circular buffer */ 38 uint16_t head; 39 /* index of tail element in circular buffer */ 40 uint16_t tail; 41 /* number of elements in buffer */ 42 uint16_t count; 43 /* size of circular buffer */ 44 uint16_t size; 45 /* Pointer to hold rte_crypto_ops for batching */ 46 struct rte_crypto_op **op_buffer; 47 } __rte_cache_aligned; 48 49 struct event_crypto_adapter { 50 /* Event device identifier */ 51 uint8_t eventdev_id; 52 /* Event port identifier */ 53 uint8_t event_port_id; 54 /* Store event device's implicit release capability */ 55 uint8_t implicit_release_disabled; 56 /* Flag to indicate backpressure at cryptodev 57 * Stop further dequeuing events from eventdev 58 */ 59 bool stop_enq_to_cryptodev; 60 /* Max crypto ops processed in any service function invocation */ 61 uint32_t max_nb; 62 /* Lock to serialize config updates with service function */ 63 rte_spinlock_t lock; 64 /* Next crypto device to be processed */ 65 uint16_t next_cdev_id; 66 /* Per crypto device structure */ 67 struct crypto_device_info *cdevs; 68 /* Loop counter to flush crypto ops */ 69 uint16_t transmit_loop_count; 70 /* Circular buffer for batching crypto ops to eventdev */ 71 struct crypto_ops_circular_buffer ebuf; 72 /* Per instance stats structure */ 73 struct rte_event_crypto_adapter_stats crypto_stats; 74 /* Configuration callback for rte_service configuration */ 75 rte_event_crypto_adapter_conf_cb conf_cb; 76 /* Configuration callback argument */ 77 void *conf_arg; 78 /* Set if default_cb is being used */ 79 int default_cb_arg; 80 /* Service initialization state */ 81 uint8_t service_inited; 82 /* Memory allocation name */ 83 char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN]; 84 /* Socket identifier cached from eventdev */ 85 int socket_id; 86 /* Per adapter EAL service */ 87 uint32_t service_id; 88 /* No. of queue pairs configured */ 89 uint16_t nb_qps; 90 /* Adapter mode */ 91 enum rte_event_crypto_adapter_mode mode; 92 } __rte_cache_aligned; 93 94 /* Per crypto device information */ 95 struct crypto_device_info { 96 /* Pointer to cryptodev */ 97 struct rte_cryptodev *dev; 98 /* Pointer to queue pair info */ 99 struct crypto_queue_pair_info *qpairs; 100 /* Next queue pair to be processed */ 101 uint16_t next_queue_pair_id; 102 /* Set to indicate cryptodev->eventdev packet 103 * transfer uses a hardware mechanism 104 */ 105 uint8_t internal_event_port; 106 /* Set to indicate processing has been started */ 107 uint8_t dev_started; 108 /* If num_qpairs > 0, the start callback will 109 * be invoked if not already invoked 110 */ 111 uint16_t num_qpairs; 112 } __rte_cache_aligned; 113 114 /* Per queue pair information */ 115 struct crypto_queue_pair_info { 116 /* Set to indicate queue pair is enabled */ 117 bool qp_enabled; 118 /* Circular buffer for batching crypto ops to cdev */ 119 struct crypto_ops_circular_buffer cbuf; 120 } __rte_cache_aligned; 121 122 static struct event_crypto_adapter **event_crypto_adapter; 123 124 /* Macros to check for valid adapter */ 125 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ 126 if (!eca_valid_id(id)) { \ 127 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \ 128 return retval; \ 129 } \ 130 } while (0) 131 132 static inline int 133 eca_valid_id(uint8_t id) 134 { 135 return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE; 136 } 137 138 static int 139 eca_init(void) 140 { 141 const char *name = "crypto_adapter_array"; 142 const struct rte_memzone *mz; 143 unsigned int sz; 144 145 sz = sizeof(*event_crypto_adapter) * 146 RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE; 147 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE); 148 149 mz = rte_memzone_lookup(name); 150 if (mz == NULL) { 151 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0, 152 RTE_CACHE_LINE_SIZE); 153 if (mz == NULL) { 154 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %" 155 PRId32, rte_errno); 156 return -rte_errno; 157 } 158 } 159 160 event_crypto_adapter = mz->addr; 161 return 0; 162 } 163 164 static inline bool 165 eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp) 166 { 167 return bufp->count >= BATCH_SIZE; 168 } 169 170 static inline bool 171 eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp) 172 { 173 return (bufp->size - bufp->count) >= BATCH_SIZE; 174 } 175 176 static inline void 177 eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp) 178 { 179 rte_free(bufp->op_buffer); 180 } 181 182 static inline int 183 eca_circular_buffer_init(const char *name, 184 struct crypto_ops_circular_buffer *bufp, 185 uint16_t sz) 186 { 187 bufp->op_buffer = rte_zmalloc(name, 188 sizeof(struct rte_crypto_op *) * sz, 189 0); 190 if (bufp->op_buffer == NULL) 191 return -ENOMEM; 192 193 bufp->size = sz; 194 return 0; 195 } 196 197 static inline int 198 eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp, 199 struct rte_crypto_op *op) 200 { 201 uint16_t *tailp = &bufp->tail; 202 203 bufp->op_buffer[*tailp] = op; 204 /* circular buffer, go round */ 205 *tailp = (*tailp + 1) % bufp->size; 206 bufp->count++; 207 208 return 0; 209 } 210 211 static inline int 212 eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp, 213 uint8_t cdev_id, uint16_t qp_id, 214 uint16_t *nb_ops_flushed) 215 { 216 uint16_t n = 0; 217 uint16_t *headp = &bufp->head; 218 uint16_t *tailp = &bufp->tail; 219 struct rte_crypto_op **ops = bufp->op_buffer; 220 221 if (*tailp > *headp) 222 n = *tailp - *headp; 223 else if (*tailp < *headp) 224 n = bufp->size - *headp; 225 else { 226 *nb_ops_flushed = 0; 227 return 0; /* buffer empty */ 228 } 229 230 *nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id, 231 &ops[*headp], n); 232 bufp->count -= *nb_ops_flushed; 233 if (!bufp->count) { 234 *headp = 0; 235 *tailp = 0; 236 } else 237 *headp = (*headp + *nb_ops_flushed) % bufp->size; 238 239 return *nb_ops_flushed == n ? 0 : -1; 240 } 241 242 static inline struct event_crypto_adapter * 243 eca_id_to_adapter(uint8_t id) 244 { 245 return event_crypto_adapter ? 246 event_crypto_adapter[id] : NULL; 247 } 248 249 static int 250 eca_default_config_cb(uint8_t id, uint8_t dev_id, 251 struct rte_event_crypto_adapter_conf *conf, void *arg) 252 { 253 struct rte_event_dev_config dev_conf; 254 struct rte_eventdev *dev; 255 uint8_t port_id; 256 int started; 257 int ret; 258 struct rte_event_port_conf *port_conf = arg; 259 struct event_crypto_adapter *adapter = eca_id_to_adapter(id); 260 261 if (adapter == NULL) 262 return -EINVAL; 263 264 dev = &rte_eventdevs[adapter->eventdev_id]; 265 dev_conf = dev->data->dev_conf; 266 267 started = dev->data->dev_started; 268 if (started) 269 rte_event_dev_stop(dev_id); 270 port_id = dev_conf.nb_event_ports; 271 dev_conf.nb_event_ports += 1; 272 ret = rte_event_dev_configure(dev_id, &dev_conf); 273 if (ret) { 274 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id); 275 if (started) { 276 if (rte_event_dev_start(dev_id)) 277 return -EIO; 278 } 279 return ret; 280 } 281 282 ret = rte_event_port_setup(dev_id, port_id, port_conf); 283 if (ret) { 284 RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id); 285 return ret; 286 } 287 288 conf->event_port_id = port_id; 289 conf->max_nb = DEFAULT_MAX_NB; 290 if (started) 291 ret = rte_event_dev_start(dev_id); 292 293 adapter->default_cb_arg = 1; 294 return ret; 295 } 296 297 int 298 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id, 299 rte_event_crypto_adapter_conf_cb conf_cb, 300 enum rte_event_crypto_adapter_mode mode, 301 void *conf_arg) 302 { 303 struct event_crypto_adapter *adapter; 304 char mem_name[CRYPTO_ADAPTER_NAME_LEN]; 305 struct rte_event_dev_info dev_info; 306 int socket_id; 307 uint8_t i; 308 int ret; 309 310 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 311 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 312 if (conf_cb == NULL) 313 return -EINVAL; 314 315 if (event_crypto_adapter == NULL) { 316 ret = eca_init(); 317 if (ret) 318 return ret; 319 } 320 321 adapter = eca_id_to_adapter(id); 322 if (adapter != NULL) { 323 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id); 324 return -EEXIST; 325 } 326 327 socket_id = rte_event_dev_socket_id(dev_id); 328 snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN, 329 "rte_event_crypto_adapter_%d", id); 330 331 adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter), 332 RTE_CACHE_LINE_SIZE, socket_id); 333 if (adapter == NULL) { 334 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!"); 335 return -ENOMEM; 336 } 337 338 if (eca_circular_buffer_init("eca_edev_circular_buffer", 339 &adapter->ebuf, 340 CRYPTO_ADAPTER_BUFFER_SZ)) { 341 RTE_EDEV_LOG_ERR("Failed to get memory for eventdev buffer"); 342 rte_free(adapter); 343 return -ENOMEM; 344 } 345 346 ret = rte_event_dev_info_get(dev_id, &dev_info); 347 if (ret < 0) { 348 RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!", 349 dev_id, dev_info.driver_name); 350 eca_circular_buffer_free(&adapter->ebuf); 351 rte_free(adapter); 352 return ret; 353 } 354 355 adapter->implicit_release_disabled = (dev_info.event_dev_cap & 356 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE); 357 adapter->eventdev_id = dev_id; 358 adapter->socket_id = socket_id; 359 adapter->conf_cb = conf_cb; 360 adapter->conf_arg = conf_arg; 361 adapter->mode = mode; 362 strcpy(adapter->mem_name, mem_name); 363 adapter->cdevs = rte_zmalloc_socket(adapter->mem_name, 364 rte_cryptodev_count() * 365 sizeof(struct crypto_device_info), 0, 366 socket_id); 367 if (adapter->cdevs == NULL) { 368 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n"); 369 eca_circular_buffer_free(&adapter->ebuf); 370 rte_free(adapter); 371 return -ENOMEM; 372 } 373 374 rte_spinlock_init(&adapter->lock); 375 for (i = 0; i < rte_cryptodev_count(); i++) 376 adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i); 377 378 event_crypto_adapter[id] = adapter; 379 380 rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg, 381 mode); 382 return 0; 383 } 384 385 386 int 387 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id, 388 struct rte_event_port_conf *port_config, 389 enum rte_event_crypto_adapter_mode mode) 390 { 391 struct rte_event_port_conf *pc; 392 int ret; 393 394 if (port_config == NULL) 395 return -EINVAL; 396 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 397 398 pc = rte_malloc(NULL, sizeof(*pc), 0); 399 if (pc == NULL) 400 return -ENOMEM; 401 *pc = *port_config; 402 ret = rte_event_crypto_adapter_create_ext(id, dev_id, 403 eca_default_config_cb, 404 mode, 405 pc); 406 if (ret) 407 rte_free(pc); 408 409 return ret; 410 } 411 412 int 413 rte_event_crypto_adapter_free(uint8_t id) 414 { 415 struct event_crypto_adapter *adapter; 416 417 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 418 419 adapter = eca_id_to_adapter(id); 420 if (adapter == NULL) 421 return -EINVAL; 422 423 if (adapter->nb_qps) { 424 RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted", 425 adapter->nb_qps); 426 return -EBUSY; 427 } 428 429 rte_eventdev_trace_crypto_adapter_free(id, adapter); 430 if (adapter->default_cb_arg) 431 rte_free(adapter->conf_arg); 432 rte_free(adapter->cdevs); 433 rte_free(adapter); 434 event_crypto_adapter[id] = NULL; 435 436 return 0; 437 } 438 439 static inline unsigned int 440 eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev, 441 unsigned int cnt) 442 { 443 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats; 444 union rte_event_crypto_metadata *m_data = NULL; 445 struct crypto_queue_pair_info *qp_info = NULL; 446 struct rte_crypto_op *crypto_op; 447 unsigned int i, n; 448 uint16_t qp_id, nb_enqueued = 0; 449 uint8_t cdev_id; 450 int ret; 451 452 ret = 0; 453 n = 0; 454 stats->event_deq_count += cnt; 455 456 for (i = 0; i < cnt; i++) { 457 crypto_op = ev[i].event_ptr; 458 if (crypto_op == NULL) 459 continue; 460 m_data = rte_cryptodev_session_event_mdata_get(crypto_op); 461 if (m_data == NULL) { 462 rte_pktmbuf_free(crypto_op->sym->m_src); 463 rte_crypto_op_free(crypto_op); 464 continue; 465 } 466 467 cdev_id = m_data->request_info.cdev_id; 468 qp_id = m_data->request_info.queue_pair_id; 469 qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id]; 470 if (!qp_info->qp_enabled) { 471 rte_pktmbuf_free(crypto_op->sym->m_src); 472 rte_crypto_op_free(crypto_op); 473 continue; 474 } 475 eca_circular_buffer_add(&qp_info->cbuf, crypto_op); 476 477 if (eca_circular_buffer_batch_ready(&qp_info->cbuf)) { 478 ret = eca_circular_buffer_flush_to_cdev(&qp_info->cbuf, 479 cdev_id, 480 qp_id, 481 &nb_enqueued); 482 /** 483 * If some crypto ops failed to flush to cdev and 484 * space for another batch is not available, stop 485 * dequeue from eventdev momentarily 486 */ 487 if (unlikely(ret < 0 && 488 !eca_circular_buffer_space_for_batch( 489 &qp_info->cbuf))) 490 adapter->stop_enq_to_cryptodev = true; 491 } 492 493 stats->crypto_enq_count += nb_enqueued; 494 n += nb_enqueued; 495 } 496 497 return n; 498 } 499 500 static unsigned int 501 eca_crypto_cdev_flush(struct event_crypto_adapter *adapter, 502 uint8_t cdev_id, uint16_t *nb_ops_flushed) 503 { 504 struct crypto_device_info *curr_dev; 505 struct crypto_queue_pair_info *curr_queue; 506 struct rte_cryptodev *dev; 507 uint16_t nb = 0, nb_enqueued = 0; 508 uint16_t qp; 509 510 curr_dev = &adapter->cdevs[cdev_id]; 511 dev = rte_cryptodev_pmd_get_dev(cdev_id); 512 513 for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) { 514 515 curr_queue = &curr_dev->qpairs[qp]; 516 if (unlikely(curr_queue == NULL || !curr_queue->qp_enabled)) 517 continue; 518 519 eca_circular_buffer_flush_to_cdev(&curr_queue->cbuf, 520 cdev_id, 521 qp, 522 &nb_enqueued); 523 *nb_ops_flushed += curr_queue->cbuf.count; 524 nb += nb_enqueued; 525 } 526 527 return nb; 528 } 529 530 static unsigned int 531 eca_crypto_enq_flush(struct event_crypto_adapter *adapter) 532 { 533 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats; 534 uint8_t cdev_id; 535 uint16_t nb_enqueued = 0; 536 uint16_t nb_ops_flushed = 0; 537 uint16_t num_cdev = rte_cryptodev_count(); 538 539 for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) 540 nb_enqueued += eca_crypto_cdev_flush(adapter, 541 cdev_id, 542 &nb_ops_flushed); 543 /** 544 * Enable dequeue from eventdev if all ops from circular 545 * buffer flushed to cdev 546 */ 547 if (!nb_ops_flushed) 548 adapter->stop_enq_to_cryptodev = false; 549 550 stats->crypto_enq_count += nb_enqueued; 551 552 return nb_enqueued; 553 } 554 555 static int 556 eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter, 557 unsigned int max_enq) 558 { 559 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats; 560 struct rte_event ev[BATCH_SIZE]; 561 unsigned int nb_enq, nb_enqueued; 562 uint16_t n; 563 uint8_t event_dev_id = adapter->eventdev_id; 564 uint8_t event_port_id = adapter->event_port_id; 565 566 nb_enqueued = 0; 567 if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) 568 return 0; 569 570 if (unlikely(adapter->stop_enq_to_cryptodev)) { 571 nb_enqueued += eca_crypto_enq_flush(adapter); 572 573 if (unlikely(adapter->stop_enq_to_cryptodev)) 574 goto skip_event_dequeue_burst; 575 } 576 577 for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) { 578 stats->event_poll_count++; 579 n = rte_event_dequeue_burst(event_dev_id, 580 event_port_id, ev, BATCH_SIZE, 0); 581 582 if (!n) 583 break; 584 585 nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n); 586 } 587 588 skip_event_dequeue_burst: 589 590 if ((++adapter->transmit_loop_count & 591 (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) { 592 nb_enqueued += eca_crypto_enq_flush(adapter); 593 } 594 595 return nb_enqueued; 596 } 597 598 static inline uint16_t 599 eca_ops_enqueue_burst(struct event_crypto_adapter *adapter, 600 struct rte_crypto_op **ops, uint16_t num) 601 { 602 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats; 603 union rte_event_crypto_metadata *m_data = NULL; 604 uint8_t event_dev_id = adapter->eventdev_id; 605 uint8_t event_port_id = adapter->event_port_id; 606 struct rte_event events[BATCH_SIZE]; 607 uint16_t nb_enqueued, nb_ev; 608 uint8_t retry; 609 uint8_t i; 610 611 nb_ev = 0; 612 retry = 0; 613 nb_enqueued = 0; 614 num = RTE_MIN(num, BATCH_SIZE); 615 for (i = 0; i < num; i++) { 616 struct rte_event *ev = &events[nb_ev++]; 617 618 m_data = rte_cryptodev_session_event_mdata_get(ops[i]); 619 if (unlikely(m_data == NULL)) { 620 rte_pktmbuf_free(ops[i]->sym->m_src); 621 rte_crypto_op_free(ops[i]); 622 continue; 623 } 624 625 rte_memcpy(ev, &m_data->response_info, sizeof(*ev)); 626 ev->event_ptr = ops[i]; 627 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 628 if (adapter->implicit_release_disabled) 629 ev->op = RTE_EVENT_OP_FORWARD; 630 else 631 ev->op = RTE_EVENT_OP_NEW; 632 } 633 634 do { 635 nb_enqueued += rte_event_enqueue_burst(event_dev_id, 636 event_port_id, 637 &events[nb_enqueued], 638 nb_ev - nb_enqueued); 639 640 } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES && 641 nb_enqueued < nb_ev); 642 643 stats->event_enq_fail_count += nb_ev - nb_enqueued; 644 stats->event_enq_count += nb_enqueued; 645 stats->event_enq_retry_count += retry - 1; 646 647 return nb_enqueued; 648 } 649 650 static int 651 eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter, 652 struct crypto_ops_circular_buffer *bufp) 653 { 654 uint16_t n = 0, nb_ops_flushed; 655 uint16_t *headp = &bufp->head; 656 uint16_t *tailp = &bufp->tail; 657 struct rte_crypto_op **ops = bufp->op_buffer; 658 659 if (*tailp > *headp) 660 n = *tailp - *headp; 661 else if (*tailp < *headp) 662 n = bufp->size - *headp; 663 else 664 return 0; /* buffer empty */ 665 666 nb_ops_flushed = eca_ops_enqueue_burst(adapter, ops, n); 667 bufp->count -= nb_ops_flushed; 668 if (!bufp->count) { 669 *headp = 0; 670 *tailp = 0; 671 return 0; /* buffer empty */ 672 } 673 674 *headp = (*headp + nb_ops_flushed) % bufp->size; 675 return 1; 676 } 677 678 679 static void 680 eca_ops_buffer_flush(struct event_crypto_adapter *adapter) 681 { 682 if (likely(adapter->ebuf.count == 0)) 683 return; 684 685 while (eca_circular_buffer_flush_to_evdev(adapter, 686 &adapter->ebuf)) 687 ; 688 } 689 static inline unsigned int 690 eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter, 691 unsigned int max_deq) 692 { 693 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats; 694 struct crypto_device_info *curr_dev; 695 struct crypto_queue_pair_info *curr_queue; 696 struct rte_crypto_op *ops[BATCH_SIZE]; 697 uint16_t n, nb_deq, nb_enqueued, i; 698 struct rte_cryptodev *dev; 699 uint8_t cdev_id; 700 uint16_t qp, dev_qps; 701 bool done; 702 uint16_t num_cdev = rte_cryptodev_count(); 703 704 nb_deq = 0; 705 eca_ops_buffer_flush(adapter); 706 707 do { 708 done = true; 709 710 for (cdev_id = adapter->next_cdev_id; 711 cdev_id < num_cdev; cdev_id++) { 712 uint16_t queues = 0; 713 714 curr_dev = &adapter->cdevs[cdev_id]; 715 dev = curr_dev->dev; 716 if (unlikely(dev == NULL)) 717 continue; 718 719 dev_qps = dev->data->nb_queue_pairs; 720 721 for (qp = curr_dev->next_queue_pair_id; 722 queues < dev_qps; qp = (qp + 1) % dev_qps, 723 queues++) { 724 725 curr_queue = &curr_dev->qpairs[qp]; 726 if (unlikely(curr_queue == NULL || 727 !curr_queue->qp_enabled)) 728 continue; 729 730 n = rte_cryptodev_dequeue_burst(cdev_id, qp, 731 ops, BATCH_SIZE); 732 if (!n) 733 continue; 734 735 done = false; 736 nb_enqueued = 0; 737 738 stats->crypto_deq_count += n; 739 740 if (unlikely(!adapter->ebuf.count)) 741 nb_enqueued = eca_ops_enqueue_burst( 742 adapter, ops, n); 743 744 if (likely(nb_enqueued == n)) 745 goto check; 746 747 /* Failed to enqueue events case */ 748 for (i = nb_enqueued; i < n; i++) 749 eca_circular_buffer_add( 750 &adapter->ebuf, 751 ops[nb_enqueued]); 752 753 check: 754 nb_deq += n; 755 756 if (nb_deq >= max_deq) { 757 if ((qp + 1) == dev_qps) { 758 adapter->next_cdev_id = 759 (cdev_id + 1) 760 % num_cdev; 761 } 762 curr_dev->next_queue_pair_id = (qp + 1) 763 % dev->data->nb_queue_pairs; 764 765 return nb_deq; 766 } 767 } 768 } 769 adapter->next_cdev_id = 0; 770 } while (done == false); 771 return nb_deq; 772 } 773 774 static void 775 eca_crypto_adapter_run(struct event_crypto_adapter *adapter, 776 unsigned int max_ops) 777 { 778 unsigned int ops_left = max_ops; 779 780 while (ops_left > 0) { 781 unsigned int e_cnt, d_cnt; 782 783 e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left); 784 ops_left -= RTE_MIN(ops_left, e_cnt); 785 786 d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left); 787 ops_left -= RTE_MIN(ops_left, d_cnt); 788 789 if (e_cnt == 0 && d_cnt == 0) 790 break; 791 792 } 793 794 if (ops_left == max_ops) 795 rte_event_maintain(adapter->eventdev_id, 796 adapter->event_port_id, 0); 797 } 798 799 static int 800 eca_service_func(void *args) 801 { 802 struct event_crypto_adapter *adapter = args; 803 804 if (rte_spinlock_trylock(&adapter->lock) == 0) 805 return 0; 806 eca_crypto_adapter_run(adapter, adapter->max_nb); 807 rte_spinlock_unlock(&adapter->lock); 808 809 return 0; 810 } 811 812 static int 813 eca_init_service(struct event_crypto_adapter *adapter, uint8_t id) 814 { 815 struct rte_event_crypto_adapter_conf adapter_conf; 816 struct rte_service_spec service; 817 int ret; 818 819 if (adapter->service_inited) 820 return 0; 821 822 memset(&service, 0, sizeof(service)); 823 snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN, 824 "rte_event_crypto_adapter_%d", id); 825 service.socket_id = adapter->socket_id; 826 service.callback = eca_service_func; 827 service.callback_userdata = adapter; 828 /* Service function handles locking for queue add/del updates */ 829 service.capabilities = RTE_SERVICE_CAP_MT_SAFE; 830 ret = rte_service_component_register(&service, &adapter->service_id); 831 if (ret) { 832 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32, 833 service.name, ret); 834 return ret; 835 } 836 837 ret = adapter->conf_cb(id, adapter->eventdev_id, 838 &adapter_conf, adapter->conf_arg); 839 if (ret) { 840 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32, 841 ret); 842 return ret; 843 } 844 845 adapter->max_nb = adapter_conf.max_nb; 846 adapter->event_port_id = adapter_conf.event_port_id; 847 adapter->service_inited = 1; 848 849 return ret; 850 } 851 852 static void 853 eca_update_qp_info(struct event_crypto_adapter *adapter, 854 struct crypto_device_info *dev_info, int32_t queue_pair_id, 855 uint8_t add) 856 { 857 struct crypto_queue_pair_info *qp_info; 858 int enabled; 859 uint16_t i; 860 861 if (dev_info->qpairs == NULL) 862 return; 863 864 if (queue_pair_id == -1) { 865 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++) 866 eca_update_qp_info(adapter, dev_info, i, add); 867 } else { 868 qp_info = &dev_info->qpairs[queue_pair_id]; 869 enabled = qp_info->qp_enabled; 870 if (add) { 871 adapter->nb_qps += !enabled; 872 dev_info->num_qpairs += !enabled; 873 } else { 874 adapter->nb_qps -= enabled; 875 dev_info->num_qpairs -= enabled; 876 } 877 qp_info->qp_enabled = !!add; 878 } 879 } 880 881 static int 882 eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id, 883 int queue_pair_id) 884 { 885 struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id]; 886 struct crypto_queue_pair_info *qpairs; 887 uint32_t i; 888 889 if (dev_info->qpairs == NULL) { 890 dev_info->qpairs = 891 rte_zmalloc_socket(adapter->mem_name, 892 dev_info->dev->data->nb_queue_pairs * 893 sizeof(struct crypto_queue_pair_info), 894 0, adapter->socket_id); 895 if (dev_info->qpairs == NULL) 896 return -ENOMEM; 897 898 qpairs = dev_info->qpairs; 899 900 if (eca_circular_buffer_init("eca_cdev_circular_buffer", 901 &qpairs->cbuf, 902 CRYPTO_ADAPTER_OPS_BUFFER_SZ)) { 903 RTE_EDEV_LOG_ERR("Failed to get memory for cryptodev " 904 "buffer"); 905 rte_free(qpairs); 906 return -ENOMEM; 907 } 908 } 909 910 if (queue_pair_id == -1) { 911 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++) 912 eca_update_qp_info(adapter, dev_info, i, 1); 913 } else 914 eca_update_qp_info(adapter, dev_info, 915 (uint16_t)queue_pair_id, 1); 916 917 return 0; 918 } 919 920 int 921 rte_event_crypto_adapter_queue_pair_add(uint8_t id, 922 uint8_t cdev_id, 923 int32_t queue_pair_id, 924 const struct rte_event_crypto_adapter_queue_conf *conf) 925 { 926 struct rte_event_crypto_adapter_vector_limits limits; 927 struct event_crypto_adapter *adapter; 928 struct crypto_device_info *dev_info; 929 struct rte_eventdev *dev; 930 uint32_t cap; 931 int ret; 932 933 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 934 935 if (!rte_cryptodev_is_valid_dev(cdev_id)) { 936 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id); 937 return -EINVAL; 938 } 939 940 adapter = eca_id_to_adapter(id); 941 if (adapter == NULL) 942 return -EINVAL; 943 944 dev = &rte_eventdevs[adapter->eventdev_id]; 945 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id, 946 cdev_id, 947 &cap); 948 if (ret) { 949 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8 950 " cdev %" PRIu8, id, cdev_id); 951 return ret; 952 } 953 954 if (conf == NULL) { 955 if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) { 956 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u", 957 cdev_id); 958 return -EINVAL; 959 } 960 } else { 961 if (conf->flags & RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR) { 962 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) == 0) { 963 RTE_EDEV_LOG_ERR("Event vectorization is not supported," 964 "dev %" PRIu8 " cdev %" PRIu8, id, 965 cdev_id); 966 return -ENOTSUP; 967 } 968 969 ret = rte_event_crypto_adapter_vector_limits_get( 970 adapter->eventdev_id, cdev_id, &limits); 971 if (ret < 0) { 972 RTE_EDEV_LOG_ERR("Failed to get event device vector " 973 "limits, dev %" PRIu8 " cdev %" PRIu8, 974 id, cdev_id); 975 return -EINVAL; 976 } 977 978 if (conf->vector_sz < limits.min_sz || 979 conf->vector_sz > limits.max_sz || 980 conf->vector_timeout_ns < limits.min_timeout_ns || 981 conf->vector_timeout_ns > limits.max_timeout_ns || 982 conf->vector_mp == NULL) { 983 RTE_EDEV_LOG_ERR("Invalid event vector configuration," 984 " dev %" PRIu8 " cdev %" PRIu8, 985 id, cdev_id); 986 return -EINVAL; 987 } 988 989 if (conf->vector_mp->elt_size < (sizeof(struct rte_event_vector) + 990 (sizeof(uintptr_t) * conf->vector_sz))) { 991 RTE_EDEV_LOG_ERR("Invalid event vector configuration," 992 " dev %" PRIu8 " cdev %" PRIu8, 993 id, cdev_id); 994 return -EINVAL; 995 } 996 } 997 } 998 999 dev_info = &adapter->cdevs[cdev_id]; 1000 1001 if (queue_pair_id != -1 && 1002 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) { 1003 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16, 1004 (uint16_t)queue_pair_id); 1005 return -EINVAL; 1006 } 1007 1008 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD, 1009 * no need of service core as HW supports event forward capability. 1010 */ 1011 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) || 1012 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND && 1013 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) || 1014 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW && 1015 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) { 1016 if (*dev->dev_ops->crypto_adapter_queue_pair_add == NULL) 1017 return -ENOTSUP; 1018 if (dev_info->qpairs == NULL) { 1019 dev_info->qpairs = 1020 rte_zmalloc_socket(adapter->mem_name, 1021 dev_info->dev->data->nb_queue_pairs * 1022 sizeof(struct crypto_queue_pair_info), 1023 0, adapter->socket_id); 1024 if (dev_info->qpairs == NULL) 1025 return -ENOMEM; 1026 } 1027 1028 ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev, 1029 dev_info->dev, 1030 queue_pair_id, 1031 conf); 1032 if (ret) 1033 return ret; 1034 1035 else 1036 eca_update_qp_info(adapter, &adapter->cdevs[cdev_id], 1037 queue_pair_id, 1); 1038 } 1039 1040 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW, 1041 * or SW adapter, initiate services so the application can choose 1042 * which ever way it wants to use the adapter. 1043 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 1044 * Application may wants to use one of below two mode 1045 * a. OP_FORWARD mode -> HW Dequeue + SW enqueue 1046 * b. OP_NEW mode -> HW Dequeue 1047 * Case 2: No HW caps, use SW adapter 1048 * a. OP_FORWARD mode -> SW enqueue & dequeue 1049 * b. OP_NEW mode -> SW Dequeue 1050 */ 1051 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW && 1052 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && 1053 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) || 1054 (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) && 1055 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && 1056 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) && 1057 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) { 1058 rte_spinlock_lock(&adapter->lock); 1059 ret = eca_init_service(adapter, id); 1060 if (ret == 0) 1061 ret = eca_add_queue_pair(adapter, cdev_id, 1062 queue_pair_id); 1063 rte_spinlock_unlock(&adapter->lock); 1064 1065 if (ret) 1066 return ret; 1067 1068 rte_service_component_runstate_set(adapter->service_id, 1); 1069 } 1070 1071 rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, 1072 queue_pair_id, conf); 1073 return 0; 1074 } 1075 1076 int 1077 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id, 1078 int32_t queue_pair_id) 1079 { 1080 struct event_crypto_adapter *adapter; 1081 struct crypto_device_info *dev_info; 1082 struct rte_eventdev *dev; 1083 int ret; 1084 uint32_t cap; 1085 uint16_t i; 1086 1087 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 1088 1089 if (!rte_cryptodev_is_valid_dev(cdev_id)) { 1090 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id); 1091 return -EINVAL; 1092 } 1093 1094 adapter = eca_id_to_adapter(id); 1095 if (adapter == NULL) 1096 return -EINVAL; 1097 1098 dev = &rte_eventdevs[adapter->eventdev_id]; 1099 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id, 1100 cdev_id, 1101 &cap); 1102 if (ret) 1103 return ret; 1104 1105 dev_info = &adapter->cdevs[cdev_id]; 1106 1107 if (queue_pair_id != -1 && 1108 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) { 1109 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16, 1110 (uint16_t)queue_pair_id); 1111 return -EINVAL; 1112 } 1113 1114 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) || 1115 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW && 1116 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) { 1117 if (*dev->dev_ops->crypto_adapter_queue_pair_del == NULL) 1118 return -ENOTSUP; 1119 ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev, 1120 dev_info->dev, 1121 queue_pair_id); 1122 if (ret == 0) { 1123 eca_update_qp_info(adapter, 1124 &adapter->cdevs[cdev_id], 1125 queue_pair_id, 1126 0); 1127 if (dev_info->num_qpairs == 0) { 1128 rte_free(dev_info->qpairs); 1129 dev_info->qpairs = NULL; 1130 } 1131 } 1132 } else { 1133 if (adapter->nb_qps == 0) 1134 return 0; 1135 1136 rte_spinlock_lock(&adapter->lock); 1137 if (queue_pair_id == -1) { 1138 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; 1139 i++) 1140 eca_update_qp_info(adapter, dev_info, 1141 queue_pair_id, 0); 1142 } else { 1143 eca_update_qp_info(adapter, dev_info, 1144 (uint16_t)queue_pair_id, 0); 1145 } 1146 1147 if (dev_info->num_qpairs == 0) { 1148 rte_free(dev_info->qpairs); 1149 dev_info->qpairs = NULL; 1150 } 1151 1152 rte_spinlock_unlock(&adapter->lock); 1153 rte_service_component_runstate_set(adapter->service_id, 1154 adapter->nb_qps); 1155 } 1156 1157 rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id, 1158 queue_pair_id, ret); 1159 return ret; 1160 } 1161 1162 static int 1163 eca_adapter_ctrl(uint8_t id, int start) 1164 { 1165 struct event_crypto_adapter *adapter; 1166 struct crypto_device_info *dev_info; 1167 struct rte_eventdev *dev; 1168 uint32_t i; 1169 int use_service; 1170 int stop = !start; 1171 1172 use_service = 0; 1173 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 1174 adapter = eca_id_to_adapter(id); 1175 if (adapter == NULL) 1176 return -EINVAL; 1177 1178 dev = &rte_eventdevs[adapter->eventdev_id]; 1179 1180 for (i = 0; i < rte_cryptodev_count(); i++) { 1181 dev_info = &adapter->cdevs[i]; 1182 /* if start check for num queue pairs */ 1183 if (start && !dev_info->num_qpairs) 1184 continue; 1185 /* if stop check if dev has been started */ 1186 if (stop && !dev_info->dev_started) 1187 continue; 1188 use_service |= !dev_info->internal_event_port; 1189 dev_info->dev_started = start; 1190 if (dev_info->internal_event_port == 0) 1191 continue; 1192 start ? (*dev->dev_ops->crypto_adapter_start)(dev, 1193 &dev_info->dev[i]) : 1194 (*dev->dev_ops->crypto_adapter_stop)(dev, 1195 &dev_info->dev[i]); 1196 } 1197 1198 if (use_service) 1199 rte_service_runstate_set(adapter->service_id, start); 1200 1201 return 0; 1202 } 1203 1204 int 1205 rte_event_crypto_adapter_start(uint8_t id) 1206 { 1207 struct event_crypto_adapter *adapter; 1208 1209 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 1210 adapter = eca_id_to_adapter(id); 1211 if (adapter == NULL) 1212 return -EINVAL; 1213 1214 rte_eventdev_trace_crypto_adapter_start(id, adapter); 1215 return eca_adapter_ctrl(id, 1); 1216 } 1217 1218 int 1219 rte_event_crypto_adapter_stop(uint8_t id) 1220 { 1221 rte_eventdev_trace_crypto_adapter_stop(id); 1222 return eca_adapter_ctrl(id, 0); 1223 } 1224 1225 int 1226 rte_event_crypto_adapter_stats_get(uint8_t id, 1227 struct rte_event_crypto_adapter_stats *stats) 1228 { 1229 struct event_crypto_adapter *adapter; 1230 struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 }; 1231 struct rte_event_crypto_adapter_stats dev_stats; 1232 struct rte_eventdev *dev; 1233 struct crypto_device_info *dev_info; 1234 uint32_t i; 1235 int ret; 1236 1237 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 1238 1239 adapter = eca_id_to_adapter(id); 1240 if (adapter == NULL || stats == NULL) 1241 return -EINVAL; 1242 1243 dev = &rte_eventdevs[adapter->eventdev_id]; 1244 memset(stats, 0, sizeof(*stats)); 1245 for (i = 0; i < rte_cryptodev_count(); i++) { 1246 dev_info = &adapter->cdevs[i]; 1247 if (dev_info->internal_event_port == 0 || 1248 dev->dev_ops->crypto_adapter_stats_get == NULL) 1249 continue; 1250 ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev, 1251 dev_info->dev, 1252 &dev_stats); 1253 if (ret) 1254 continue; 1255 1256 dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count; 1257 dev_stats_sum.event_enq_count += 1258 dev_stats.event_enq_count; 1259 } 1260 1261 if (adapter->service_inited) 1262 *stats = adapter->crypto_stats; 1263 1264 stats->crypto_deq_count += dev_stats_sum.crypto_deq_count; 1265 stats->event_enq_count += dev_stats_sum.event_enq_count; 1266 1267 return 0; 1268 } 1269 1270 int 1271 rte_event_crypto_adapter_stats_reset(uint8_t id) 1272 { 1273 struct event_crypto_adapter *adapter; 1274 struct crypto_device_info *dev_info; 1275 struct rte_eventdev *dev; 1276 uint32_t i; 1277 1278 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 1279 1280 adapter = eca_id_to_adapter(id); 1281 if (adapter == NULL) 1282 return -EINVAL; 1283 1284 dev = &rte_eventdevs[adapter->eventdev_id]; 1285 for (i = 0; i < rte_cryptodev_count(); i++) { 1286 dev_info = &adapter->cdevs[i]; 1287 if (dev_info->internal_event_port == 0 || 1288 dev->dev_ops->crypto_adapter_stats_reset == NULL) 1289 continue; 1290 (*dev->dev_ops->crypto_adapter_stats_reset)(dev, 1291 dev_info->dev); 1292 } 1293 1294 memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats)); 1295 return 0; 1296 } 1297 1298 int 1299 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id) 1300 { 1301 struct event_crypto_adapter *adapter; 1302 1303 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 1304 1305 adapter = eca_id_to_adapter(id); 1306 if (adapter == NULL || service_id == NULL) 1307 return -EINVAL; 1308 1309 if (adapter->service_inited) 1310 *service_id = adapter->service_id; 1311 1312 return adapter->service_inited ? 0 : -ESRCH; 1313 } 1314 1315 int 1316 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id) 1317 { 1318 struct event_crypto_adapter *adapter; 1319 1320 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 1321 1322 adapter = eca_id_to_adapter(id); 1323 if (adapter == NULL || event_port_id == NULL) 1324 return -EINVAL; 1325 1326 *event_port_id = adapter->event_port_id; 1327 1328 return 0; 1329 } 1330 1331 int 1332 rte_event_crypto_adapter_vector_limits_get( 1333 uint8_t dev_id, uint16_t cdev_id, 1334 struct rte_event_crypto_adapter_vector_limits *limits) 1335 { 1336 struct rte_cryptodev *cdev; 1337 struct rte_eventdev *dev; 1338 uint32_t cap; 1339 int ret; 1340 1341 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1342 1343 if (!rte_cryptodev_is_valid_dev(cdev_id)) { 1344 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id); 1345 return -EINVAL; 1346 } 1347 1348 if (limits == NULL) { 1349 RTE_EDEV_LOG_ERR("Invalid limits storage provided"); 1350 return -EINVAL; 1351 } 1352 1353 dev = &rte_eventdevs[dev_id]; 1354 cdev = rte_cryptodev_pmd_get_dev(cdev_id); 1355 1356 ret = rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap); 1357 if (ret) { 1358 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8 1359 "cdev %" PRIu16, dev_id, cdev_id); 1360 return ret; 1361 } 1362 1363 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) { 1364 RTE_EDEV_LOG_ERR("Event vectorization is not supported," 1365 "dev %" PRIu8 " cdev %" PRIu8, dev_id, cdev_id); 1366 return -ENOTSUP; 1367 } 1368 1369 if ((*dev->dev_ops->crypto_adapter_vector_limits_get) == NULL) 1370 return -ENOTSUP; 1371 1372 return dev->dev_ops->crypto_adapter_vector_limits_get( 1373 dev, cdev, limits); 1374 } 1375