1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017-2019 NXP 3 */ 4 5 #include <assert.h> 6 #include <stdio.h> 7 #include <stdbool.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <sys/epoll.h> 12 13 #include <rte_atomic.h> 14 #include <rte_byteorder.h> 15 #include <rte_common.h> 16 #include <rte_debug.h> 17 #include <dev_driver.h> 18 #include <rte_eal.h> 19 #include <rte_lcore.h> 20 #include <rte_log.h> 21 #include <rte_malloc.h> 22 #include <rte_memcpy.h> 23 #include <rte_memory.h> 24 #include <rte_memzone.h> 25 #include <rte_pci.h> 26 #include <rte_eventdev.h> 27 #include <eventdev_pmd_vdev.h> 28 #include <rte_ethdev.h> 29 #include <rte_event_crypto_adapter.h> 30 #include <rte_event_eth_rx_adapter.h> 31 #include <rte_event_eth_tx_adapter.h> 32 #include <cryptodev_pmd.h> 33 #include <bus_dpaa_driver.h> 34 #include <rte_dpaa_logs.h> 35 #include <rte_cycles.h> 36 #include <rte_kvargs.h> 37 38 #include <dpaa_ethdev.h> 39 #include <dpaa_sec_event.h> 40 #include "dpaa_eventdev.h" 41 #include <dpaa_mempool.h> 42 43 /* 44 * Clarifications 45 * Evendev = Virtual Instance for SoC 46 * Eventport = Portal Instance 47 * Eventqueue = Channel Instance 48 * 1 Eventdev can have N Eventqueue 49 */ 50 RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_eventdev, NOTICE); 51 52 #define DISABLE_INTR_MODE "disable_intr" 53 54 static int 55 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, 56 uint64_t *timeout_ticks) 57 { 58 EVENTDEV_INIT_FUNC_TRACE(); 59 60 RTE_SET_USED(dev); 61 62 uint64_t cycles_per_second; 63 64 cycles_per_second = rte_get_timer_hz(); 65 *timeout_ticks = (ns * cycles_per_second) / NS_PER_S; 66 67 return 0; 68 } 69 70 static int 71 dpaa_event_dequeue_timeout_ticks_intr(struct rte_eventdev *dev, uint64_t ns, 72 uint64_t *timeout_ticks) 73 { 74 RTE_SET_USED(dev); 75 76 *timeout_ticks = ns/1000; 77 return 0; 78 } 79 80 static void 81 dpaa_eventq_portal_add(u16 ch_id) 82 { 83 uint32_t sdqcr; 84 85 sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id); 86 qman_static_dequeue_add(sdqcr, NULL); 87 } 88 89 static uint16_t 90 dpaa_event_enqueue_burst(void *port, const struct rte_event ev[], 91 uint16_t nb_events) 92 { 93 uint16_t i; 94 struct rte_mbuf *mbuf; 95 96 RTE_SET_USED(port); 97 /*Release all the contexts saved previously*/ 98 for (i = 0; i < nb_events; i++) { 99 switch (ev[i].op) { 100 case RTE_EVENT_OP_RELEASE: 101 qman_dca_index(ev[i].impl_opaque, 0); 102 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i); 103 *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN; 104 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i); 105 DPAA_PER_LCORE_DQRR_SIZE--; 106 break; 107 default: 108 break; 109 } 110 } 111 112 return nb_events; 113 } 114 115 static uint16_t 116 dpaa_event_enqueue(void *port, const struct rte_event *ev) 117 { 118 return dpaa_event_enqueue_burst(port, ev, 1); 119 } 120 121 static void drain_4_bytes(int fd, fd_set *fdset) 122 { 123 if (FD_ISSET(fd, fdset)) { 124 /* drain 4 bytes */ 125 uint32_t junk; 126 ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk)); 127 if (sjunk != sizeof(junk)) 128 DPAA_EVENTDEV_ERR("UIO irq read error"); 129 } 130 } 131 132 static inline int 133 dpaa_event_dequeue_wait(uint64_t timeout_ticks) 134 { 135 int fd_qman, nfds; 136 int ret; 137 fd_set readset; 138 139 /* Go into (and back out of) IRQ mode for each select, 140 * it simplifies exit-path considerations and other 141 * potential nastiness. 142 */ 143 struct timeval tv = { 144 .tv_sec = timeout_ticks / 1000000, 145 .tv_usec = timeout_ticks % 1000000 146 }; 147 148 fd_qman = qman_thread_fd(); 149 nfds = fd_qman + 1; 150 FD_ZERO(&readset); 151 FD_SET(fd_qman, &readset); 152 153 qman_irqsource_add(QM_PIRQ_DQRI); 154 155 ret = select(nfds, &readset, NULL, NULL, &tv); 156 if (ret < 0) 157 return ret; 158 /* Calling irqsource_remove() prior to thread_irq() 159 * means thread_irq() will not process whatever caused 160 * the interrupts, however it does ensure that, once 161 * thread_irq() re-enables interrupts, they won't fire 162 * again immediately. 163 */ 164 qman_irqsource_remove(~0); 165 drain_4_bytes(fd_qman, &readset); 166 qman_thread_irq(); 167 168 return ret; 169 } 170 171 static uint16_t 172 dpaa_event_dequeue_burst(void *port, struct rte_event ev[], 173 uint16_t nb_events, uint64_t timeout_ticks) 174 { 175 int ret; 176 u16 ch_id; 177 void *buffers[8]; 178 u32 num_frames, i; 179 uint64_t cur_ticks = 0, wait_time_ticks = 0; 180 struct dpaa_port *portal = (struct dpaa_port *)port; 181 struct rte_mbuf *mbuf; 182 183 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 184 /* Affine current thread context to a qman portal */ 185 ret = rte_dpaa_portal_init((void *)0); 186 if (ret) { 187 DPAA_EVENTDEV_ERR("Unable to initialize portal"); 188 return ret; 189 } 190 } 191 192 if (unlikely(!portal->is_port_linked)) { 193 /* 194 * Affine event queue for current thread context 195 * to a qman portal. 196 */ 197 for (i = 0; i < portal->num_linked_evq; i++) { 198 ch_id = portal->evq_info[i].ch_id; 199 dpaa_eventq_portal_add(ch_id); 200 } 201 portal->is_port_linked = true; 202 } 203 204 /* Check if there are atomic contexts to be released */ 205 i = 0; 206 while (DPAA_PER_LCORE_DQRR_SIZE) { 207 if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) { 208 qman_dca_index(i, 0); 209 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i); 210 *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN; 211 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i); 212 DPAA_PER_LCORE_DQRR_SIZE--; 213 } 214 i++; 215 } 216 DPAA_PER_LCORE_DQRR_HELD = 0; 217 218 if (timeout_ticks) 219 wait_time_ticks = timeout_ticks; 220 else 221 wait_time_ticks = portal->timeout_us; 222 223 wait_time_ticks += rte_get_timer_cycles(); 224 do { 225 /* Lets dequeue the frames */ 226 num_frames = qman_portal_dequeue(ev, nb_events, buffers); 227 if (num_frames) 228 break; 229 cur_ticks = rte_get_timer_cycles(); 230 } while (cur_ticks < wait_time_ticks); 231 232 return num_frames; 233 } 234 235 static uint16_t 236 dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks) 237 { 238 return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks); 239 } 240 241 static uint16_t 242 dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[], 243 uint16_t nb_events, uint64_t timeout_ticks) 244 { 245 int ret; 246 u16 ch_id; 247 void *buffers[8]; 248 u32 num_frames, i, irq = 0; 249 uint64_t cur_ticks = 0, wait_time_ticks = 0; 250 struct dpaa_port *portal = (struct dpaa_port *)port; 251 struct rte_mbuf *mbuf; 252 253 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 254 /* Affine current thread context to a qman portal */ 255 ret = rte_dpaa_portal_init((void *)0); 256 if (ret) { 257 DPAA_EVENTDEV_ERR("Unable to initialize portal"); 258 return ret; 259 } 260 } 261 262 if (unlikely(!portal->is_port_linked)) { 263 /* 264 * Affine event queue for current thread context 265 * to a qman portal. 266 */ 267 for (i = 0; i < portal->num_linked_evq; i++) { 268 ch_id = portal->evq_info[i].ch_id; 269 dpaa_eventq_portal_add(ch_id); 270 } 271 portal->is_port_linked = true; 272 } 273 274 /* Check if there are atomic contexts to be released */ 275 i = 0; 276 while (DPAA_PER_LCORE_DQRR_SIZE) { 277 if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) { 278 qman_dca_index(i, 0); 279 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i); 280 *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN; 281 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i); 282 DPAA_PER_LCORE_DQRR_SIZE--; 283 } 284 i++; 285 } 286 DPAA_PER_LCORE_DQRR_HELD = 0; 287 288 if (timeout_ticks) 289 wait_time_ticks = timeout_ticks; 290 else 291 wait_time_ticks = portal->timeout_us; 292 293 do { 294 /* Lets dequeue the frames */ 295 num_frames = qman_portal_dequeue(ev, nb_events, buffers); 296 if (irq) 297 irq = 0; 298 if (num_frames) 299 break; 300 if (wait_time_ticks) { /* wait for time */ 301 if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) { 302 irq = 1; 303 continue; 304 } 305 break; /* no event after waiting */ 306 } 307 cur_ticks = rte_get_timer_cycles(); 308 } while (cur_ticks < wait_time_ticks); 309 310 return num_frames; 311 } 312 313 static uint16_t 314 dpaa_event_dequeue_intr(void *port, 315 struct rte_event *ev, 316 uint64_t timeout_ticks) 317 { 318 return dpaa_event_dequeue_burst_intr(port, ev, 1, timeout_ticks); 319 } 320 321 static void 322 dpaa_event_dev_info_get(struct rte_eventdev *dev, 323 struct rte_event_dev_info *dev_info) 324 { 325 EVENTDEV_INIT_FUNC_TRACE(); 326 327 RTE_SET_USED(dev); 328 dev_info->driver_name = "event_dpaa1"; 329 dev_info->min_dequeue_timeout_ns = 330 DPAA_EVENT_MIN_DEQUEUE_TIMEOUT; 331 dev_info->max_dequeue_timeout_ns = 332 DPAA_EVENT_MAX_DEQUEUE_TIMEOUT; 333 dev_info->dequeue_timeout_ns = 334 DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS; 335 dev_info->max_event_queues = 336 DPAA_EVENT_MAX_QUEUES; 337 dev_info->max_event_queue_flows = 338 DPAA_EVENT_MAX_QUEUE_FLOWS; 339 dev_info->max_event_queue_priority_levels = 340 DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS; 341 dev_info->max_event_priority_levels = 342 DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS; 343 dev_info->max_event_ports = 344 DPAA_EVENT_MAX_EVENT_PORT; 345 dev_info->max_event_port_dequeue_depth = 346 DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH; 347 dev_info->max_event_port_enqueue_depth = 348 DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH; 349 /* 350 * TODO: Need to find out that how to fetch this info 351 * from kernel or somewhere else. 352 */ 353 dev_info->max_num_events = 354 DPAA_EVENT_MAX_NUM_EVENTS; 355 dev_info->event_dev_cap = 356 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 357 RTE_EVENT_DEV_CAP_BURST_MODE | 358 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | 359 RTE_EVENT_DEV_CAP_NONSEQ_MODE | 360 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID | 361 RTE_EVENT_DEV_CAP_MAINTENANCE_FREE; 362 dev_info->max_profiles_per_port = 1; 363 } 364 365 static int 366 dpaa_event_dev_configure(const struct rte_eventdev *dev) 367 { 368 struct dpaa_eventdev *priv = dev->data->dev_private; 369 struct rte_event_dev_config *conf = &dev->data->dev_conf; 370 int ret, i; 371 uint32_t *ch_id; 372 373 EVENTDEV_INIT_FUNC_TRACE(); 374 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns; 375 priv->nb_events_limit = conf->nb_events_limit; 376 priv->nb_event_queues = conf->nb_event_queues; 377 priv->nb_event_ports = conf->nb_event_ports; 378 priv->nb_event_queue_flows = conf->nb_event_queue_flows; 379 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth; 380 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth; 381 priv->event_dev_cfg = conf->event_dev_cfg; 382 383 ch_id = rte_malloc("dpaa-channels", 384 sizeof(uint32_t) * priv->nb_event_queues, 385 RTE_CACHE_LINE_SIZE); 386 if (ch_id == NULL) { 387 DPAA_EVENTDEV_ERR("Fail to allocate memory for dpaa channels\n"); 388 return -ENOMEM; 389 } 390 /* Create requested event queues within the given event device */ 391 ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0); 392 if (ret < 0) { 393 DPAA_EVENTDEV_ERR("qman_alloc_pool_range %u, err =%d\n", 394 priv->nb_event_queues, ret); 395 rte_free(ch_id); 396 return ret; 397 } 398 for (i = 0; i < priv->nb_event_queues; i++) 399 priv->evq_info[i].ch_id = (u16)ch_id[i]; 400 401 /* Lets prepare event ports */ 402 memset(&priv->ports[0], 0, 403 sizeof(struct dpaa_port) * priv->nb_event_ports); 404 405 /* Check dequeue timeout method is per dequeue or global */ 406 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 407 /* 408 * Use timeout value as given in dequeue operation. 409 * So invalidating this timeout value. 410 */ 411 priv->dequeue_timeout_ns = 0; 412 413 } else if (conf->dequeue_timeout_ns == 0) { 414 priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS; 415 } else { 416 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns; 417 } 418 419 for (i = 0; i < priv->nb_event_ports; i++) { 420 if (priv->intr_mode) { 421 priv->ports[i].timeout_us = 422 priv->dequeue_timeout_ns/1000; 423 } else { 424 uint64_t cycles_per_second; 425 426 cycles_per_second = rte_get_timer_hz(); 427 priv->ports[i].timeout_us = 428 (priv->dequeue_timeout_ns * cycles_per_second) 429 / NS_PER_S; 430 } 431 } 432 433 /* 434 * TODO: Currently portals are affined with threads. Maximum threads 435 * can be created equals to number of lcore. 436 */ 437 rte_free(ch_id); 438 DPAA_EVENTDEV_INFO("Configured eventdev devid=%d", dev->data->dev_id); 439 440 return 0; 441 } 442 443 static int 444 dpaa_event_dev_start(struct rte_eventdev *dev) 445 { 446 EVENTDEV_INIT_FUNC_TRACE(); 447 RTE_SET_USED(dev); 448 449 return 0; 450 } 451 452 static void 453 dpaa_event_dev_stop(struct rte_eventdev *dev) 454 { 455 EVENTDEV_INIT_FUNC_TRACE(); 456 RTE_SET_USED(dev); 457 } 458 459 static int 460 dpaa_event_dev_close(struct rte_eventdev *dev) 461 { 462 EVENTDEV_INIT_FUNC_TRACE(); 463 RTE_SET_USED(dev); 464 465 return 0; 466 } 467 468 static void 469 dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 470 struct rte_event_queue_conf *queue_conf) 471 { 472 EVENTDEV_INIT_FUNC_TRACE(); 473 474 RTE_SET_USED(dev); 475 RTE_SET_USED(queue_id); 476 477 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf)); 478 queue_conf->nb_atomic_flows = DPAA_EVENT_QUEUE_ATOMIC_FLOWS; 479 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL; 480 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST; 481 } 482 483 static int 484 dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 485 const struct rte_event_queue_conf *queue_conf) 486 { 487 struct dpaa_eventdev *priv = dev->data->dev_private; 488 struct dpaa_eventq *evq_info = &priv->evq_info[queue_id]; 489 490 EVENTDEV_INIT_FUNC_TRACE(); 491 492 switch (queue_conf->schedule_type) { 493 case RTE_SCHED_TYPE_PARALLEL: 494 case RTE_SCHED_TYPE_ATOMIC: 495 break; 496 case RTE_SCHED_TYPE_ORDERED: 497 DPAA_EVENTDEV_ERR("Schedule type is not supported."); 498 return -1; 499 } 500 evq_info->event_queue_cfg = queue_conf->event_queue_cfg; 501 evq_info->event_queue_id = queue_id; 502 503 return 0; 504 } 505 506 static void 507 dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 508 { 509 EVENTDEV_INIT_FUNC_TRACE(); 510 511 RTE_SET_USED(dev); 512 RTE_SET_USED(queue_id); 513 } 514 515 static void 516 dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id, 517 struct rte_event_port_conf *port_conf) 518 { 519 EVENTDEV_INIT_FUNC_TRACE(); 520 521 RTE_SET_USED(dev); 522 RTE_SET_USED(port_id); 523 524 port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS; 525 port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH; 526 port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH; 527 } 528 529 static int 530 dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id, 531 const struct rte_event_port_conf *port_conf) 532 { 533 struct dpaa_eventdev *eventdev = dev->data->dev_private; 534 535 EVENTDEV_INIT_FUNC_TRACE(); 536 537 RTE_SET_USED(port_conf); 538 dev->data->ports[port_id] = &eventdev->ports[port_id]; 539 540 return 0; 541 } 542 543 static void 544 dpaa_event_port_release(void *port) 545 { 546 EVENTDEV_INIT_FUNC_TRACE(); 547 548 RTE_SET_USED(port); 549 } 550 551 static int 552 dpaa_event_port_link(struct rte_eventdev *dev, void *port, 553 const uint8_t queues[], const uint8_t priorities[], 554 uint16_t nb_links) 555 { 556 struct dpaa_eventdev *priv = dev->data->dev_private; 557 struct dpaa_port *event_port = (struct dpaa_port *)port; 558 struct dpaa_eventq *event_queue; 559 uint8_t eventq_id; 560 int i; 561 562 RTE_SET_USED(dev); 563 RTE_SET_USED(priorities); 564 565 /* First check that input configuration are valid */ 566 for (i = 0; i < nb_links; i++) { 567 eventq_id = queues[i]; 568 event_queue = &priv->evq_info[eventq_id]; 569 if ((event_queue->event_queue_cfg 570 & RTE_EVENT_QUEUE_CFG_SINGLE_LINK) 571 && (event_queue->event_port)) { 572 return -EINVAL; 573 } 574 } 575 576 for (i = 0; i < nb_links; i++) { 577 eventq_id = queues[i]; 578 event_queue = &priv->evq_info[eventq_id]; 579 event_port->evq_info[i].event_queue_id = eventq_id; 580 event_port->evq_info[i].ch_id = event_queue->ch_id; 581 event_queue->event_port = port; 582 } 583 584 event_port->num_linked_evq = event_port->num_linked_evq + i; 585 586 return (int)i; 587 } 588 589 static int 590 dpaa_event_port_unlink(struct rte_eventdev *dev, void *port, 591 uint8_t queues[], uint16_t nb_links) 592 { 593 int i; 594 uint8_t eventq_id; 595 struct dpaa_eventq *event_queue; 596 struct dpaa_eventdev *priv = dev->data->dev_private; 597 struct dpaa_port *event_port = (struct dpaa_port *)port; 598 599 if (!event_port->num_linked_evq) 600 return nb_links; 601 602 for (i = 0; i < nb_links; i++) { 603 eventq_id = queues[i]; 604 event_port->evq_info[eventq_id].event_queue_id = -1; 605 event_port->evq_info[eventq_id].ch_id = 0; 606 event_queue = &priv->evq_info[eventq_id]; 607 event_queue->event_port = NULL; 608 } 609 610 if (event_port->num_linked_evq) 611 event_port->num_linked_evq = event_port->num_linked_evq - i; 612 613 return (int)i; 614 } 615 616 static int 617 dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, 618 const struct rte_eth_dev *eth_dev, 619 uint32_t *caps) 620 { 621 const char *ethdev_driver = eth_dev->device->driver->name; 622 623 EVENTDEV_INIT_FUNC_TRACE(); 624 625 RTE_SET_USED(dev); 626 627 if (!strcmp(ethdev_driver, "net_dpaa")) 628 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP; 629 else 630 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 631 632 return 0; 633 } 634 635 static int 636 dpaa_event_eth_rx_adapter_queue_add( 637 const struct rte_eventdev *dev, 638 const struct rte_eth_dev *eth_dev, 639 int32_t rx_queue_id, 640 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 641 { 642 struct dpaa_eventdev *eventdev = dev->data->dev_private; 643 uint8_t ev_qid = queue_conf->ev.queue_id; 644 u16 ch_id = eventdev->evq_info[ev_qid].ch_id; 645 struct dpaa_if *dpaa_intf = eth_dev->data->dev_private; 646 int ret, i; 647 648 EVENTDEV_INIT_FUNC_TRACE(); 649 650 if (rx_queue_id == -1) { 651 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) { 652 ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id, 653 queue_conf); 654 if (ret) { 655 DPAA_EVENTDEV_ERR( 656 "Event Queue attach failed:%d\n", ret); 657 goto detach_configured_queues; 658 } 659 } 660 return 0; 661 } 662 663 ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf); 664 if (ret) 665 DPAA_EVENTDEV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret); 666 return ret; 667 668 detach_configured_queues: 669 670 for (i = (i - 1); i >= 0 ; i--) 671 dpaa_eth_eventq_detach(eth_dev, i); 672 673 return ret; 674 } 675 676 static int 677 dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev, 678 const struct rte_eth_dev *eth_dev, 679 int32_t rx_queue_id) 680 { 681 int ret, i; 682 struct dpaa_if *dpaa_intf = eth_dev->data->dev_private; 683 684 EVENTDEV_INIT_FUNC_TRACE(); 685 686 RTE_SET_USED(dev); 687 if (rx_queue_id == -1) { 688 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) { 689 ret = dpaa_eth_eventq_detach(eth_dev, i); 690 if (ret) 691 DPAA_EVENTDEV_ERR( 692 "Event Queue detach failed:%d\n", ret); 693 } 694 695 return 0; 696 } 697 698 ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id); 699 if (ret) 700 DPAA_EVENTDEV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret); 701 return ret; 702 } 703 704 static int 705 dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev, 706 const struct rte_eth_dev *eth_dev) 707 { 708 EVENTDEV_INIT_FUNC_TRACE(); 709 710 RTE_SET_USED(dev); 711 RTE_SET_USED(eth_dev); 712 713 return 0; 714 } 715 716 static int 717 dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev, 718 const struct rte_eth_dev *eth_dev) 719 { 720 EVENTDEV_INIT_FUNC_TRACE(); 721 722 RTE_SET_USED(dev); 723 RTE_SET_USED(eth_dev); 724 725 return 0; 726 } 727 728 static int 729 dpaa_eventdev_crypto_caps_get(const struct rte_eventdev *dev, 730 const struct rte_cryptodev *cdev, 731 uint32_t *caps) 732 { 733 const char *name = cdev->data->name; 734 735 EVENTDEV_INIT_FUNC_TRACE(); 736 737 RTE_SET_USED(dev); 738 739 if (!strncmp(name, "dpaa_sec-", 9)) 740 *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA_CAP; 741 else 742 return -1; 743 744 return 0; 745 } 746 747 static int 748 dpaa_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev, 749 const struct rte_cryptodev *cryptodev, 750 const struct rte_event *ev) 751 { 752 struct dpaa_eventdev *priv = dev->data->dev_private; 753 uint8_t ev_qid = ev->queue_id; 754 u16 ch_id = priv->evq_info[ev_qid].ch_id; 755 int i, ret; 756 757 EVENTDEV_INIT_FUNC_TRACE(); 758 759 for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) { 760 ret = dpaa_sec_eventq_attach(cryptodev, i, 761 ch_id, ev); 762 if (ret) { 763 DPAA_EVENTDEV_ERR("dpaa_sec_eventq_attach failed: ret %d\n", 764 ret); 765 goto fail; 766 } 767 } 768 return 0; 769 fail: 770 for (i = (i - 1); i >= 0 ; i--) 771 dpaa_sec_eventq_detach(cryptodev, i); 772 773 return ret; 774 } 775 776 static int 777 dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev, 778 const struct rte_cryptodev *cryptodev, 779 int32_t rx_queue_id, 780 const struct rte_event_crypto_adapter_queue_conf *conf) 781 { 782 struct dpaa_eventdev *priv = dev->data->dev_private; 783 uint8_t ev_qid = conf->ev.queue_id; 784 u16 ch_id = priv->evq_info[ev_qid].ch_id; 785 int ret; 786 787 EVENTDEV_INIT_FUNC_TRACE(); 788 789 if (rx_queue_id == -1) 790 return dpaa_eventdev_crypto_queue_add_all(dev, 791 cryptodev, &conf->ev); 792 793 ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id, 794 ch_id, &conf->ev); 795 if (ret) { 796 DPAA_EVENTDEV_ERR( 797 "dpaa_sec_eventq_attach failed: ret: %d\n", ret); 798 return ret; 799 } 800 return 0; 801 } 802 803 static int 804 dpaa_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev, 805 const struct rte_cryptodev *cdev) 806 { 807 int i, ret; 808 809 EVENTDEV_INIT_FUNC_TRACE(); 810 811 RTE_SET_USED(dev); 812 813 for (i = 0; i < cdev->data->nb_queue_pairs; i++) { 814 ret = dpaa_sec_eventq_detach(cdev, i); 815 if (ret) { 816 DPAA_EVENTDEV_ERR( 817 "dpaa_sec_eventq_detach failed:ret %d\n", ret); 818 return ret; 819 } 820 } 821 822 return 0; 823 } 824 825 static int 826 dpaa_eventdev_crypto_queue_del(const struct rte_eventdev *dev, 827 const struct rte_cryptodev *cryptodev, 828 int32_t rx_queue_id) 829 { 830 int ret; 831 832 EVENTDEV_INIT_FUNC_TRACE(); 833 834 if (rx_queue_id == -1) 835 return dpaa_eventdev_crypto_queue_del_all(dev, cryptodev); 836 837 ret = dpaa_sec_eventq_detach(cryptodev, rx_queue_id); 838 if (ret) { 839 DPAA_EVENTDEV_ERR( 840 "dpaa_sec_eventq_detach failed: ret: %d\n", ret); 841 return ret; 842 } 843 844 return 0; 845 } 846 847 static int 848 dpaa_eventdev_crypto_start(const struct rte_eventdev *dev, 849 const struct rte_cryptodev *cryptodev) 850 { 851 EVENTDEV_INIT_FUNC_TRACE(); 852 853 RTE_SET_USED(dev); 854 RTE_SET_USED(cryptodev); 855 856 return 0; 857 } 858 859 static int 860 dpaa_eventdev_crypto_stop(const struct rte_eventdev *dev, 861 const struct rte_cryptodev *cryptodev) 862 { 863 EVENTDEV_INIT_FUNC_TRACE(); 864 865 RTE_SET_USED(dev); 866 RTE_SET_USED(cryptodev); 867 868 return 0; 869 } 870 871 static int 872 dpaa_eventdev_tx_adapter_create(uint8_t id, 873 const struct rte_eventdev *dev) 874 { 875 RTE_SET_USED(id); 876 RTE_SET_USED(dev); 877 878 /* Nothing to do. Simply return. */ 879 return 0; 880 } 881 882 static int 883 dpaa_eventdev_tx_adapter_caps(const struct rte_eventdev *dev, 884 const struct rte_eth_dev *eth_dev, 885 uint32_t *caps) 886 { 887 RTE_SET_USED(dev); 888 RTE_SET_USED(eth_dev); 889 890 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT; 891 return 0; 892 } 893 894 static uint16_t 895 dpaa_eventdev_txa_enqueue_same_dest(void *port, 896 struct rte_event ev[], 897 uint16_t nb_events) 898 { 899 struct rte_mbuf *m[DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0; 900 uint8_t qid, i; 901 902 RTE_SET_USED(port); 903 904 m0 = (struct rte_mbuf *)ev[0].mbuf; 905 qid = rte_event_eth_tx_adapter_txq_get(m0); 906 907 for (i = 0; i < nb_events; i++) 908 m[i] = (struct rte_mbuf *)ev[i].mbuf; 909 910 return rte_eth_tx_burst(m0->port, qid, m, nb_events); 911 } 912 913 static uint16_t 914 dpaa_eventdev_txa_enqueue(void *port, 915 struct rte_event ev[], 916 uint16_t nb_events) 917 { 918 struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf; 919 uint8_t qid, i; 920 921 RTE_SET_USED(port); 922 923 for (i = 0; i < nb_events; i++) { 924 qid = rte_event_eth_tx_adapter_txq_get(m); 925 rte_eth_tx_burst(m->port, qid, &m, 1); 926 } 927 928 return nb_events; 929 } 930 931 static struct eventdev_ops dpaa_eventdev_ops = { 932 .dev_infos_get = dpaa_event_dev_info_get, 933 .dev_configure = dpaa_event_dev_configure, 934 .dev_start = dpaa_event_dev_start, 935 .dev_stop = dpaa_event_dev_stop, 936 .dev_close = dpaa_event_dev_close, 937 .queue_def_conf = dpaa_event_queue_def_conf, 938 .queue_setup = dpaa_event_queue_setup, 939 .queue_release = dpaa_event_queue_release, 940 .port_def_conf = dpaa_event_port_default_conf_get, 941 .port_setup = dpaa_event_port_setup, 942 .port_release = dpaa_event_port_release, 943 .port_link = dpaa_event_port_link, 944 .port_unlink = dpaa_event_port_unlink, 945 .timeout_ticks = dpaa_event_dequeue_timeout_ticks, 946 .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get, 947 .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add, 948 .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del, 949 .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start, 950 .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop, 951 .eth_tx_adapter_caps_get = dpaa_eventdev_tx_adapter_caps, 952 .eth_tx_adapter_create = dpaa_eventdev_tx_adapter_create, 953 .crypto_adapter_caps_get = dpaa_eventdev_crypto_caps_get, 954 .crypto_adapter_queue_pair_add = dpaa_eventdev_crypto_queue_add, 955 .crypto_adapter_queue_pair_del = dpaa_eventdev_crypto_queue_del, 956 .crypto_adapter_start = dpaa_eventdev_crypto_start, 957 .crypto_adapter_stop = dpaa_eventdev_crypto_stop, 958 }; 959 960 static int flag_check_handler(__rte_unused const char *key, 961 const char *value, __rte_unused void *opaque) 962 { 963 if (strcmp(value, "1")) 964 return -1; 965 966 return 0; 967 } 968 969 static int 970 dpaa_event_check_flags(const char *params) 971 { 972 struct rte_kvargs *kvlist; 973 974 if (params == NULL || params[0] == '\0') 975 return 0; 976 977 kvlist = rte_kvargs_parse(params, NULL); 978 if (kvlist == NULL) 979 return 0; 980 981 if (!rte_kvargs_count(kvlist, DISABLE_INTR_MODE)) { 982 rte_kvargs_free(kvlist); 983 return 0; 984 } 985 /* INTR MODE is disabled when there's key-value pair: disable_intr = 1*/ 986 if (rte_kvargs_process(kvlist, DISABLE_INTR_MODE, 987 flag_check_handler, NULL) < 0) { 988 rte_kvargs_free(kvlist); 989 return 0; 990 } 991 rte_kvargs_free(kvlist); 992 993 return 1; 994 } 995 996 static int 997 dpaa_event_dev_create(const char *name, const char *params) 998 { 999 struct rte_eventdev *eventdev; 1000 struct dpaa_eventdev *priv; 1001 1002 eventdev = rte_event_pmd_vdev_init(name, 1003 sizeof(struct dpaa_eventdev), 1004 rte_socket_id()); 1005 if (eventdev == NULL) { 1006 DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name); 1007 goto fail; 1008 } 1009 priv = eventdev->data->dev_private; 1010 1011 eventdev->dev_ops = &dpaa_eventdev_ops; 1012 eventdev->enqueue = dpaa_event_enqueue; 1013 eventdev->enqueue_burst = dpaa_event_enqueue_burst; 1014 1015 if (dpaa_event_check_flags(params)) { 1016 eventdev->dequeue = dpaa_event_dequeue; 1017 eventdev->dequeue_burst = dpaa_event_dequeue_burst; 1018 } else { 1019 priv->intr_mode = 1; 1020 eventdev->dev_ops->timeout_ticks = 1021 dpaa_event_dequeue_timeout_ticks_intr; 1022 eventdev->dequeue = dpaa_event_dequeue_intr; 1023 eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr; 1024 } 1025 eventdev->txa_enqueue = dpaa_eventdev_txa_enqueue; 1026 eventdev->txa_enqueue_same_dest = dpaa_eventdev_txa_enqueue_same_dest; 1027 1028 RTE_LOG(INFO, PMD, "%s eventdev added", name); 1029 1030 /* For secondary processes, the primary has done all the work */ 1031 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1032 goto done; 1033 1034 priv->max_event_queues = DPAA_EVENT_MAX_QUEUES; 1035 1036 done: 1037 event_dev_probing_finish(eventdev); 1038 return 0; 1039 fail: 1040 return -EFAULT; 1041 } 1042 1043 static int 1044 dpaa_event_dev_probe(struct rte_vdev_device *vdev) 1045 { 1046 const char *name; 1047 const char *params; 1048 1049 name = rte_vdev_device_name(vdev); 1050 DPAA_EVENTDEV_INFO("Initializing %s", name); 1051 1052 params = rte_vdev_device_args(vdev); 1053 1054 return dpaa_event_dev_create(name, params); 1055 } 1056 1057 static int 1058 dpaa_event_dev_remove(struct rte_vdev_device *vdev) 1059 { 1060 const char *name; 1061 1062 name = rte_vdev_device_name(vdev); 1063 DPAA_EVENTDEV_INFO("Closing %s", name); 1064 1065 return rte_event_pmd_vdev_uninit(name); 1066 } 1067 1068 static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = { 1069 .probe = dpaa_event_dev_probe, 1070 .remove = dpaa_event_dev_remove 1071 }; 1072 1073 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd); 1074 RTE_PMD_REGISTER_PARAM_STRING(EVENTDEV_NAME_DPAA_PMD, 1075 DISABLE_INTR_MODE "=<int>"); 1076