1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Marvell International Ltd. 3 */ 4 #include <rte_bitmap.h> 5 #include <rte_ethdev.h> 6 #include <rte_eventdev.h> 7 #include <rte_event_eth_rx_adapter.h> 8 #include <rte_event_eth_tx_adapter.h> 9 #include <rte_malloc.h> 10 #include <stdbool.h> 11 12 #include "event_helper.h" 13 14 static volatile bool eth_core_running; 15 16 static int 17 eh_get_enabled_cores(struct rte_bitmap *eth_core_mask) 18 { 19 int i, count = 0; 20 21 RTE_LCORE_FOREACH(i) { 22 /* Check if this core is enabled in core mask*/ 23 if (rte_bitmap_get(eth_core_mask, i)) { 24 /* Found enabled core */ 25 count++; 26 } 27 } 28 return count; 29 } 30 31 static inline unsigned int 32 eh_get_next_eth_core(struct eventmode_conf *em_conf) 33 { 34 static unsigned int prev_core = -1; 35 unsigned int next_core; 36 37 /* 38 * Make sure we have at least one eth core running, else the following 39 * logic would lead to an infinite loop. 40 */ 41 if (eh_get_enabled_cores(em_conf->eth_core_mask) == 0) { 42 EH_LOG_ERR("No enabled eth core found"); 43 return RTE_MAX_LCORE; 44 } 45 46 /* Only some cores are marked as eth cores, skip others */ 47 do { 48 /* Get the next core */ 49 next_core = rte_get_next_lcore(prev_core, 0, 1); 50 51 /* Check if we have reached max lcores */ 52 if (next_core == RTE_MAX_LCORE) 53 return next_core; 54 55 /* Update prev_core */ 56 prev_core = next_core; 57 } while (!(rte_bitmap_get(em_conf->eth_core_mask, next_core))); 58 59 return next_core; 60 } 61 62 static inline unsigned int 63 eh_get_next_active_core(struct eventmode_conf *em_conf, unsigned int prev_core) 64 { 65 unsigned int next_core; 66 67 /* Get next active core skipping cores reserved as eth cores */ 68 do { 69 /* Get the next core */ 70 next_core = rte_get_next_lcore(prev_core, 0, 0); 71 72 /* Check if we have reached max lcores */ 73 if (next_core == RTE_MAX_LCORE) 74 return next_core; 75 76 prev_core = next_core; 77 } while (rte_bitmap_get(em_conf->eth_core_mask, next_core)); 78 79 return next_core; 80 } 81 82 static struct eventdev_params * 83 eh_get_eventdev_params(struct eventmode_conf *em_conf, uint8_t eventdev_id) 84 { 85 int i; 86 87 for (i = 0; i < em_conf->nb_eventdev; i++) { 88 if (em_conf->eventdev_config[i].eventdev_id == eventdev_id) 89 break; 90 } 91 92 /* No match */ 93 if (i == em_conf->nb_eventdev) 94 return NULL; 95 96 return &(em_conf->eventdev_config[i]); 97 } 98 99 static inline bool 100 eh_dev_has_rx_internal_port(uint8_t eventdev_id) 101 { 102 bool flag = true; 103 int j; 104 105 RTE_ETH_FOREACH_DEV(j) { 106 uint32_t caps = 0; 107 108 rte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps); 109 if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) 110 flag = false; 111 } 112 return flag; 113 } 114 115 static inline bool 116 eh_dev_has_tx_internal_port(uint8_t eventdev_id) 117 { 118 bool flag = true; 119 int j; 120 121 RTE_ETH_FOREACH_DEV(j) { 122 uint32_t caps = 0; 123 124 rte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps); 125 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) 126 flag = false; 127 } 128 return flag; 129 } 130 131 static inline bool 132 eh_dev_has_burst_mode(uint8_t dev_id) 133 { 134 struct rte_event_dev_info dev_info; 135 136 rte_event_dev_info_get(dev_id, &dev_info); 137 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ? 138 true : false; 139 } 140 141 static int 142 eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) 143 { 144 int lcore_count, nb_eventdev, nb_eth_dev, ret; 145 struct eventdev_params *eventdev_config; 146 struct rte_event_dev_info dev_info; 147 148 /* Get the number of event devices */ 149 nb_eventdev = rte_event_dev_count(); 150 if (nb_eventdev == 0) { 151 EH_LOG_ERR("No event devices detected"); 152 return -EINVAL; 153 } 154 155 if (nb_eventdev != 1) { 156 EH_LOG_ERR("Event mode does not support multiple event devices. " 157 "Please provide only one event device."); 158 return -EINVAL; 159 } 160 161 /* Get the number of eth devs */ 162 nb_eth_dev = rte_eth_dev_count_avail(); 163 if (nb_eth_dev == 0) { 164 EH_LOG_ERR("No eth devices detected"); 165 return -EINVAL; 166 } 167 168 /* Get the number of lcores */ 169 lcore_count = rte_lcore_count(); 170 171 /* Read event device info */ 172 ret = rte_event_dev_info_get(0, &dev_info); 173 if (ret < 0) { 174 EH_LOG_ERR("Failed to read event device info %d", ret); 175 return ret; 176 } 177 178 /* Check if enough ports are available */ 179 if (dev_info.max_event_ports < 2) { 180 EH_LOG_ERR("Not enough event ports available"); 181 return -EINVAL; 182 } 183 184 /* Get the first event dev conf */ 185 eventdev_config = &(em_conf->eventdev_config[0]); 186 187 /* Save number of queues & ports available */ 188 eventdev_config->eventdev_id = 0; 189 eventdev_config->nb_eventqueue = dev_info.max_event_queues; 190 eventdev_config->nb_eventport = dev_info.max_event_ports; 191 eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES; 192 193 /* Check if there are more queues than required */ 194 if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) { 195 /* One queue is reserved for Tx */ 196 eventdev_config->nb_eventqueue = nb_eth_dev + 1; 197 } 198 199 /* Check if there are more ports than required */ 200 if (eventdev_config->nb_eventport > lcore_count) { 201 /* One port per lcore is enough */ 202 eventdev_config->nb_eventport = lcore_count; 203 } 204 205 /* Update the number of event devices */ 206 em_conf->nb_eventdev++; 207 208 return 0; 209 } 210 211 static void 212 eh_do_capability_check(struct eventmode_conf *em_conf) 213 { 214 struct eventdev_params *eventdev_config; 215 int all_internal_ports = 1; 216 uint32_t eventdev_id; 217 int i; 218 219 for (i = 0; i < em_conf->nb_eventdev; i++) { 220 221 /* Get the event dev conf */ 222 eventdev_config = &(em_conf->eventdev_config[i]); 223 eventdev_id = eventdev_config->eventdev_id; 224 225 /* Check if event device has internal port for Rx & Tx */ 226 if (eh_dev_has_rx_internal_port(eventdev_id) && 227 eh_dev_has_tx_internal_port(eventdev_id)) { 228 eventdev_config->all_internal_ports = 1; 229 } else { 230 all_internal_ports = 0; 231 } 232 } 233 234 /* 235 * If Rx & Tx internal ports are supported by all event devices then 236 * eth cores won't be required. Override the eth core mask requested 237 * and decrement number of event queues by one as it won't be needed 238 * for Tx. 239 */ 240 if (all_internal_ports) { 241 rte_bitmap_reset(em_conf->eth_core_mask); 242 for (i = 0; i < em_conf->nb_eventdev; i++) 243 em_conf->eventdev_config[i].nb_eventqueue--; 244 } 245 } 246 247 static int 248 eh_set_default_conf_link(struct eventmode_conf *em_conf) 249 { 250 struct eventdev_params *eventdev_config; 251 struct eh_event_link_info *link; 252 unsigned int lcore_id = -1; 253 int i, link_index; 254 255 /* 256 * Create a 1:1 mapping from event ports to cores. If the number 257 * of event ports is lesser than the cores, some cores won't 258 * execute worker. If there are more event ports, then some ports 259 * won't be used. 260 * 261 */ 262 263 /* 264 * The event queue-port mapping is done according to the link. Since 265 * we are falling back to the default link config, enabling 266 * "all_ev_queue_to_ev_port" mode flag. This will map all queues 267 * to the port. 268 */ 269 em_conf->ext_params.all_ev_queue_to_ev_port = 1; 270 271 /* Get first event dev conf */ 272 eventdev_config = &(em_conf->eventdev_config[0]); 273 274 /* Loop through the ports */ 275 for (i = 0; i < eventdev_config->nb_eventport; i++) { 276 277 /* Get next active core id */ 278 lcore_id = eh_get_next_active_core(em_conf, 279 lcore_id); 280 281 if (lcore_id == RTE_MAX_LCORE) { 282 /* Reached max cores */ 283 return 0; 284 } 285 286 /* Save the current combination as one link */ 287 288 /* Get the index */ 289 link_index = em_conf->nb_link; 290 291 /* Get the corresponding link */ 292 link = &(em_conf->link[link_index]); 293 294 /* Save link */ 295 link->eventdev_id = eventdev_config->eventdev_id; 296 link->event_port_id = i; 297 link->lcore_id = lcore_id; 298 299 /* 300 * Don't set eventq_id as by default all queues 301 * need to be mapped to the port, which is controlled 302 * by the operating mode. 303 */ 304 305 /* Update number of links */ 306 em_conf->nb_link++; 307 } 308 309 return 0; 310 } 311 312 static int 313 eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf) 314 { 315 struct rx_adapter_connection_info *conn; 316 struct eventdev_params *eventdev_config; 317 struct rx_adapter_conf *adapter; 318 bool rx_internal_port = true; 319 bool single_ev_queue = false; 320 int nb_eventqueue; 321 uint32_t caps = 0; 322 int eventdev_id; 323 int nb_eth_dev; 324 int adapter_id; 325 int conn_id; 326 int i; 327 328 /* Create one adapter with eth queues mapped to event queue(s) */ 329 330 if (em_conf->nb_eventdev == 0) { 331 EH_LOG_ERR("No event devs registered"); 332 return -EINVAL; 333 } 334 335 /* Get the number of eth devs */ 336 nb_eth_dev = rte_eth_dev_count_avail(); 337 338 /* Use the first event dev */ 339 eventdev_config = &(em_conf->eventdev_config[0]); 340 341 /* Get eventdev ID */ 342 eventdev_id = eventdev_config->eventdev_id; 343 adapter_id = 0; 344 345 /* Get adapter conf */ 346 adapter = &(em_conf->rx_adapter[adapter_id]); 347 348 /* Set adapter conf */ 349 adapter->eventdev_id = eventdev_id; 350 adapter->adapter_id = adapter_id; 351 352 /* 353 * If event device does not have internal ports for passing 354 * packets then reserved one queue for Tx path 355 */ 356 nb_eventqueue = eventdev_config->all_internal_ports ? 357 eventdev_config->nb_eventqueue : 358 eventdev_config->nb_eventqueue - 1; 359 360 /* 361 * Map all queues of eth device (port) to an event queue. If there 362 * are more event queues than eth ports then create 1:1 mapping. 363 * Otherwise map all eth ports to a single event queue. 364 */ 365 if (nb_eth_dev > nb_eventqueue) 366 single_ev_queue = true; 367 368 for (i = 0; i < nb_eth_dev; i++) { 369 370 /* Use only the ports enabled */ 371 if ((em_conf->eth_portmask & (1 << i)) == 0) 372 continue; 373 374 /* Get the connection id */ 375 conn_id = adapter->nb_connections; 376 377 /* Get the connection */ 378 conn = &(adapter->conn[conn_id]); 379 380 /* Set mapping between eth ports & event queues*/ 381 conn->ethdev_id = i; 382 conn->eventq_id = single_ev_queue ? 0 : i; 383 384 /* Add all eth queues eth port to event queue */ 385 conn->ethdev_rx_qid = -1; 386 387 /* Get Rx adapter capabilities */ 388 rte_event_eth_rx_adapter_caps_get(eventdev_id, i, &caps); 389 if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) 390 rx_internal_port = false; 391 392 /* Update no of connections */ 393 adapter->nb_connections++; 394 395 } 396 397 if (rx_internal_port) { 398 /* Rx core is not required */ 399 adapter->rx_core_id = -1; 400 } else { 401 /* Rx core is required */ 402 adapter->rx_core_id = eh_get_next_eth_core(em_conf); 403 } 404 405 /* We have setup one adapter */ 406 em_conf->nb_rx_adapter = 1; 407 408 return 0; 409 } 410 411 static int 412 eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf) 413 { 414 struct tx_adapter_connection_info *conn; 415 struct eventdev_params *eventdev_config; 416 struct tx_adapter_conf *tx_adapter; 417 bool tx_internal_port = true; 418 uint32_t caps = 0; 419 int eventdev_id; 420 int adapter_id; 421 int nb_eth_dev; 422 int conn_id; 423 int i; 424 425 /* 426 * Create one Tx adapter with all eth queues mapped to event queues 427 * 1:1. 428 */ 429 430 if (em_conf->nb_eventdev == 0) { 431 EH_LOG_ERR("No event devs registered"); 432 return -EINVAL; 433 } 434 435 /* Get the number of eth devs */ 436 nb_eth_dev = rte_eth_dev_count_avail(); 437 438 /* Use the first event dev */ 439 eventdev_config = &(em_conf->eventdev_config[0]); 440 441 /* Get eventdev ID */ 442 eventdev_id = eventdev_config->eventdev_id; 443 adapter_id = 0; 444 445 /* Get adapter conf */ 446 tx_adapter = &(em_conf->tx_adapter[adapter_id]); 447 448 /* Set adapter conf */ 449 tx_adapter->eventdev_id = eventdev_id; 450 tx_adapter->adapter_id = adapter_id; 451 452 /* 453 * Map all Tx queues of the eth device (port) to the event device. 454 */ 455 456 /* Set defaults for connections */ 457 458 /* 459 * One eth device (port) is one connection. Map all Tx queues 460 * of the device to the Tx adapter. 461 */ 462 463 for (i = 0; i < nb_eth_dev; i++) { 464 465 /* Use only the ports enabled */ 466 if ((em_conf->eth_portmask & (1 << i)) == 0) 467 continue; 468 469 /* Get the connection id */ 470 conn_id = tx_adapter->nb_connections; 471 472 /* Get the connection */ 473 conn = &(tx_adapter->conn[conn_id]); 474 475 /* Add ethdev to connections */ 476 conn->ethdev_id = i; 477 478 /* Add all eth tx queues to adapter */ 479 conn->ethdev_tx_qid = -1; 480 481 /* Get Tx adapter capabilities */ 482 rte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps); 483 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) 484 tx_internal_port = false; 485 486 /* Update no of connections */ 487 tx_adapter->nb_connections++; 488 } 489 490 if (tx_internal_port) { 491 /* Tx core is not required */ 492 tx_adapter->tx_core_id = -1; 493 } else { 494 /* Tx core is required */ 495 tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf); 496 497 /* 498 * Use one event queue per adapter for submitting packets 499 * for Tx. Reserving the last queue available 500 */ 501 /* Queue numbers start at 0 */ 502 tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1; 503 } 504 505 /* We have setup one adapter */ 506 em_conf->nb_tx_adapter = 1; 507 return 0; 508 } 509 510 static int 511 eh_validate_conf(struct eventmode_conf *em_conf) 512 { 513 int ret; 514 515 /* 516 * Check if event devs are specified. Else probe the event devices 517 * and initialize the config with all ports & queues available 518 */ 519 if (em_conf->nb_eventdev == 0) { 520 ret = eh_set_default_conf_eventdev(em_conf); 521 if (ret != 0) 522 return ret; 523 } 524 525 /* Perform capability check for the selected event devices */ 526 eh_do_capability_check(em_conf); 527 528 /* 529 * Check if links are specified. Else generate a default config for 530 * the event ports used. 531 */ 532 if (em_conf->nb_link == 0) { 533 ret = eh_set_default_conf_link(em_conf); 534 if (ret != 0) 535 return ret; 536 } 537 538 /* 539 * Check if rx adapters are specified. Else generate a default config 540 * with one rx adapter and all eth queues - event queue mapped. 541 */ 542 if (em_conf->nb_rx_adapter == 0) { 543 ret = eh_set_default_conf_rx_adapter(em_conf); 544 if (ret != 0) 545 return ret; 546 } 547 548 /* 549 * Check if tx adapters are specified. Else generate a default config 550 * with one tx adapter. 551 */ 552 if (em_conf->nb_tx_adapter == 0) { 553 ret = eh_set_default_conf_tx_adapter(em_conf); 554 if (ret != 0) 555 return ret; 556 } 557 558 return 0; 559 } 560 561 static int 562 eh_initialize_eventdev(struct eventmode_conf *em_conf) 563 { 564 struct rte_event_queue_conf eventq_conf = {0}; 565 struct rte_event_dev_info evdev_default_conf; 566 struct rte_event_dev_config eventdev_conf; 567 struct eventdev_params *eventdev_config; 568 int nb_eventdev = em_conf->nb_eventdev; 569 struct eh_event_link_info *link; 570 uint8_t *queue = NULL; 571 uint8_t eventdev_id; 572 int nb_eventqueue; 573 uint8_t i, j; 574 int ret; 575 576 for (i = 0; i < nb_eventdev; i++) { 577 578 /* Get eventdev config */ 579 eventdev_config = &(em_conf->eventdev_config[i]); 580 581 /* Get event dev ID */ 582 eventdev_id = eventdev_config->eventdev_id; 583 584 /* Get the number of queues */ 585 nb_eventqueue = eventdev_config->nb_eventqueue; 586 587 /* Reset the default conf */ 588 memset(&evdev_default_conf, 0, 589 sizeof(struct rte_event_dev_info)); 590 591 /* Get default conf of eventdev */ 592 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf); 593 if (ret < 0) { 594 EH_LOG_ERR( 595 "Error in getting event device info[devID:%d]", 596 eventdev_id); 597 return ret; 598 } 599 600 memset(&eventdev_conf, 0, sizeof(struct rte_event_dev_config)); 601 eventdev_conf.nb_events_limit = 602 evdev_default_conf.max_num_events; 603 eventdev_conf.nb_event_queues = nb_eventqueue; 604 eventdev_conf.nb_event_ports = 605 eventdev_config->nb_eventport; 606 eventdev_conf.nb_event_queue_flows = 607 evdev_default_conf.max_event_queue_flows; 608 eventdev_conf.nb_event_port_dequeue_depth = 609 evdev_default_conf.max_event_port_dequeue_depth; 610 eventdev_conf.nb_event_port_enqueue_depth = 611 evdev_default_conf.max_event_port_enqueue_depth; 612 613 /* Configure event device */ 614 ret = rte_event_dev_configure(eventdev_id, &eventdev_conf); 615 if (ret < 0) { 616 EH_LOG_ERR("Error in configuring event device"); 617 return ret; 618 } 619 620 /* Configure event queues */ 621 for (j = 0; j < nb_eventqueue; j++) { 622 623 memset(&eventq_conf, 0, 624 sizeof(struct rte_event_queue_conf)); 625 626 /* Per event dev queues can be ATQ or SINGLE LINK */ 627 eventq_conf.event_queue_cfg = 628 eventdev_config->ev_queue_mode; 629 /* 630 * All queues need to be set with sched_type as 631 * schedule type for the application stage. One 632 * queue would be reserved for the final eth tx 633 * stage if event device does not have internal 634 * ports. This will be an atomic queue. 635 */ 636 if (!eventdev_config->all_internal_ports && 637 j == nb_eventqueue-1) { 638 eventq_conf.schedule_type = 639 RTE_SCHED_TYPE_ATOMIC; 640 } else { 641 eventq_conf.schedule_type = 642 em_conf->ext_params.sched_type; 643 } 644 645 /* Set max atomic flows to 1024 */ 646 eventq_conf.nb_atomic_flows = 1024; 647 eventq_conf.nb_atomic_order_sequences = 1024; 648 649 /* Setup the queue */ 650 ret = rte_event_queue_setup(eventdev_id, j, 651 &eventq_conf); 652 if (ret < 0) { 653 EH_LOG_ERR("Failed to setup event queue %d", 654 ret); 655 return ret; 656 } 657 } 658 659 /* Configure event ports */ 660 for (j = 0; j < eventdev_config->nb_eventport; j++) { 661 ret = rte_event_port_setup(eventdev_id, j, NULL); 662 if (ret < 0) { 663 EH_LOG_ERR("Failed to setup event port %d", 664 ret); 665 return ret; 666 } 667 } 668 } 669 670 /* Make event queue - event port link */ 671 for (j = 0; j < em_conf->nb_link; j++) { 672 673 /* Get link info */ 674 link = &(em_conf->link[j]); 675 676 /* Get event dev ID */ 677 eventdev_id = link->eventdev_id; 678 679 /* 680 * If "all_ev_queue_to_ev_port" params flag is selected, all 681 * queues need to be mapped to the port. 682 */ 683 if (em_conf->ext_params.all_ev_queue_to_ev_port) 684 queue = NULL; 685 else 686 queue = &(link->eventq_id); 687 688 /* Link queue to port */ 689 ret = rte_event_port_link(eventdev_id, link->event_port_id, 690 queue, NULL, 1); 691 if (ret < 0) { 692 EH_LOG_ERR("Failed to link event port %d", ret); 693 return ret; 694 } 695 } 696 697 /* Start event devices */ 698 for (i = 0; i < nb_eventdev; i++) { 699 700 /* Get eventdev config */ 701 eventdev_config = &(em_conf->eventdev_config[i]); 702 703 ret = rte_event_dev_start(eventdev_config->eventdev_id); 704 if (ret < 0) { 705 EH_LOG_ERR("Failed to start event device %d, %d", 706 i, ret); 707 return ret; 708 } 709 } 710 return 0; 711 } 712 713 static int 714 eh_rx_adapter_configure(struct eventmode_conf *em_conf, 715 struct rx_adapter_conf *adapter) 716 { 717 struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0}; 718 struct rte_event_dev_info evdev_default_conf = {0}; 719 struct rte_event_port_conf port_conf = {0}; 720 struct rx_adapter_connection_info *conn; 721 uint8_t eventdev_id; 722 uint32_t service_id; 723 int ret; 724 int j; 725 726 /* Get event dev ID */ 727 eventdev_id = adapter->eventdev_id; 728 729 /* Get default configuration of event dev */ 730 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf); 731 if (ret < 0) { 732 EH_LOG_ERR("Failed to get event dev info %d", ret); 733 return ret; 734 } 735 736 /* Setup port conf */ 737 port_conf.new_event_threshold = 1200; 738 port_conf.dequeue_depth = 739 evdev_default_conf.max_event_port_dequeue_depth; 740 port_conf.enqueue_depth = 741 evdev_default_conf.max_event_port_enqueue_depth; 742 743 /* Create Rx adapter */ 744 ret = rte_event_eth_rx_adapter_create(adapter->adapter_id, 745 adapter->eventdev_id, &port_conf); 746 if (ret < 0) { 747 EH_LOG_ERR("Failed to create rx adapter %d", ret); 748 return ret; 749 } 750 751 /* Setup various connections in the adapter */ 752 for (j = 0; j < adapter->nb_connections; j++) { 753 /* Get connection */ 754 conn = &(adapter->conn[j]); 755 756 /* Setup queue conf */ 757 queue_conf.ev.queue_id = conn->eventq_id; 758 queue_conf.ev.sched_type = em_conf->ext_params.sched_type; 759 queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV; 760 761 /* Add queue to the adapter */ 762 ret = rte_event_eth_rx_adapter_queue_add(adapter->adapter_id, 763 conn->ethdev_id, conn->ethdev_rx_qid, 764 &queue_conf); 765 if (ret < 0) { 766 EH_LOG_ERR("Failed to add eth queue to rx adapter %d", 767 ret); 768 return ret; 769 } 770 } 771 772 /* Get the service ID used by rx adapter */ 773 ret = rte_event_eth_rx_adapter_service_id_get(adapter->adapter_id, 774 &service_id); 775 if (ret != -ESRCH && ret < 0) { 776 EH_LOG_ERR("Failed to get service id used by rx adapter %d", 777 ret); 778 return ret; 779 } 780 781 rte_service_set_runstate_mapped_check(service_id, 0); 782 783 /* Start adapter */ 784 ret = rte_event_eth_rx_adapter_start(adapter->adapter_id); 785 if (ret < 0) { 786 EH_LOG_ERR("Failed to start rx adapter %d", ret); 787 return ret; 788 } 789 790 return 0; 791 } 792 793 static int 794 eh_initialize_rx_adapter(struct eventmode_conf *em_conf) 795 { 796 struct rx_adapter_conf *adapter; 797 int i, ret; 798 799 /* Configure rx adapters */ 800 for (i = 0; i < em_conf->nb_rx_adapter; i++) { 801 adapter = &(em_conf->rx_adapter[i]); 802 ret = eh_rx_adapter_configure(em_conf, adapter); 803 if (ret < 0) { 804 EH_LOG_ERR("Failed to configure rx adapter %d", ret); 805 return ret; 806 } 807 } 808 return 0; 809 } 810 811 static int32_t 812 eh_start_worker_eth_core(struct eventmode_conf *conf, uint32_t lcore_id) 813 { 814 uint32_t service_id[EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE]; 815 struct rx_adapter_conf *rx_adapter; 816 struct tx_adapter_conf *tx_adapter; 817 int service_count = 0; 818 int adapter_id; 819 int32_t ret; 820 int i; 821 822 EH_LOG_INFO("Entering eth_core processing on lcore %u", lcore_id); 823 824 /* 825 * Parse adapter config to check which of all Rx adapters need 826 * to be handled by this core. 827 */ 828 for (i = 0; i < conf->nb_rx_adapter; i++) { 829 /* Check if we have exceeded the max allowed */ 830 if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE) { 831 EH_LOG_ERR( 832 "Exceeded the max allowed adapters per rx core"); 833 break; 834 } 835 836 rx_adapter = &(conf->rx_adapter[i]); 837 if (rx_adapter->rx_core_id != lcore_id) 838 continue; 839 840 /* Adapter is handled by this core */ 841 adapter_id = rx_adapter->adapter_id; 842 843 /* Get the service ID for the adapters */ 844 ret = rte_event_eth_rx_adapter_service_id_get(adapter_id, 845 &(service_id[service_count])); 846 847 if (ret != -ESRCH && ret < 0) { 848 EH_LOG_ERR( 849 "Failed to get service id used by rx adapter"); 850 return ret; 851 } 852 853 /* Update service count */ 854 service_count++; 855 } 856 857 /* 858 * Parse adapter config to see which of all Tx adapters need 859 * to be handled by this core. 860 */ 861 for (i = 0; i < conf->nb_tx_adapter; i++) { 862 /* Check if we have exceeded the max allowed */ 863 if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_TX_CORE) { 864 EH_LOG_ERR( 865 "Exceeded the max allowed adapters per tx core"); 866 break; 867 } 868 869 tx_adapter = &conf->tx_adapter[i]; 870 if (tx_adapter->tx_core_id != lcore_id) 871 continue; 872 873 /* Adapter is handled by this core */ 874 adapter_id = tx_adapter->adapter_id; 875 876 /* Get the service ID for the adapters */ 877 ret = rte_event_eth_tx_adapter_service_id_get(adapter_id, 878 &(service_id[service_count])); 879 880 if (ret != -ESRCH && ret < 0) { 881 EH_LOG_ERR( 882 "Failed to get service id used by tx adapter"); 883 return ret; 884 } 885 886 /* Update service count */ 887 service_count++; 888 } 889 890 eth_core_running = true; 891 892 while (eth_core_running) { 893 for (i = 0; i < service_count; i++) { 894 /* Initiate adapter service */ 895 rte_service_run_iter_on_app_lcore(service_id[i], 0); 896 } 897 } 898 899 return 0; 900 } 901 902 static int32_t 903 eh_stop_worker_eth_core(void) 904 { 905 if (eth_core_running) { 906 EH_LOG_INFO("Stopping eth cores"); 907 eth_core_running = false; 908 } 909 return 0; 910 } 911 912 static struct eh_app_worker_params * 913 eh_find_worker(uint32_t lcore_id, struct eh_conf *conf, 914 struct eh_app_worker_params *app_wrkrs, uint8_t nb_wrkr_param) 915 { 916 struct eh_app_worker_params curr_conf = { {{0} }, NULL}; 917 struct eh_event_link_info *link = NULL; 918 struct eh_app_worker_params *tmp_wrkr; 919 struct eventmode_conf *em_conf; 920 uint8_t eventdev_id; 921 int i; 922 923 /* Get eventmode config */ 924 em_conf = conf->mode_params; 925 926 /* 927 * Use event device from the first lcore-event link. 928 * 929 * Assumption: All lcore-event links tied to a core are using the 930 * same event device. In other words, one core would be polling on 931 * queues of a single event device only. 932 */ 933 934 /* Get a link for this lcore */ 935 for (i = 0; i < em_conf->nb_link; i++) { 936 link = &(em_conf->link[i]); 937 if (link->lcore_id == lcore_id) 938 break; 939 } 940 941 if (link == NULL) { 942 EH_LOG_ERR("No valid link found for lcore %d", lcore_id); 943 return NULL; 944 } 945 946 /* Get event dev ID */ 947 eventdev_id = link->eventdev_id; 948 949 /* Populate the curr_conf with the capabilities */ 950 951 /* Check for Tx internal port */ 952 if (eh_dev_has_tx_internal_port(eventdev_id)) 953 curr_conf.cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT; 954 else 955 curr_conf.cap.tx_internal_port = EH_TX_TYPE_NO_INTERNAL_PORT; 956 957 /* Check for burst mode */ 958 if (eh_dev_has_burst_mode(eventdev_id)) 959 curr_conf.cap.burst = EH_RX_TYPE_BURST; 960 else 961 curr_conf.cap.burst = EH_RX_TYPE_NON_BURST; 962 963 /* Parse the passed list and see if we have matching capabilities */ 964 965 /* Initialize the pointer used to traverse the list */ 966 tmp_wrkr = app_wrkrs; 967 968 for (i = 0; i < nb_wrkr_param; i++, tmp_wrkr++) { 969 970 /* Skip this if capabilities are not matching */ 971 if (tmp_wrkr->cap.u64 != curr_conf.cap.u64) 972 continue; 973 974 /* If the checks pass, we have a match */ 975 return tmp_wrkr; 976 } 977 978 return NULL; 979 } 980 981 static int 982 eh_verify_match_worker(struct eh_app_worker_params *match_wrkr) 983 { 984 /* Verify registered worker */ 985 if (match_wrkr->worker_thread == NULL) { 986 EH_LOG_ERR("No worker registered"); 987 return 0; 988 } 989 990 /* Success */ 991 return 1; 992 } 993 994 static uint8_t 995 eh_get_event_lcore_links(uint32_t lcore_id, struct eh_conf *conf, 996 struct eh_event_link_info **links) 997 { 998 struct eh_event_link_info *link_cache; 999 struct eventmode_conf *em_conf = NULL; 1000 struct eh_event_link_info *link; 1001 uint8_t lcore_nb_link = 0; 1002 size_t single_link_size; 1003 size_t cache_size; 1004 int index = 0; 1005 int i; 1006 1007 if (conf == NULL || links == NULL) { 1008 EH_LOG_ERR("Invalid args"); 1009 return -EINVAL; 1010 } 1011 1012 /* Get eventmode conf */ 1013 em_conf = conf->mode_params; 1014 1015 if (em_conf == NULL) { 1016 EH_LOG_ERR("Invalid event mode parameters"); 1017 return -EINVAL; 1018 } 1019 1020 /* Get the number of links registered */ 1021 for (i = 0; i < em_conf->nb_link; i++) { 1022 1023 /* Get link */ 1024 link = &(em_conf->link[i]); 1025 1026 /* Check if we have link intended for this lcore */ 1027 if (link->lcore_id == lcore_id) { 1028 1029 /* Update the number of links for this core */ 1030 lcore_nb_link++; 1031 1032 } 1033 } 1034 1035 /* Compute size of one entry to be copied */ 1036 single_link_size = sizeof(struct eh_event_link_info); 1037 1038 /* Compute size of the buffer required */ 1039 cache_size = lcore_nb_link * sizeof(struct eh_event_link_info); 1040 1041 /* Compute size of the buffer required */ 1042 link_cache = calloc(1, cache_size); 1043 1044 /* Get the number of links registered */ 1045 for (i = 0; i < em_conf->nb_link; i++) { 1046 1047 /* Get link */ 1048 link = &(em_conf->link[i]); 1049 1050 /* Check if we have link intended for this lcore */ 1051 if (link->lcore_id == lcore_id) { 1052 1053 /* Cache the link */ 1054 memcpy(&link_cache[index], link, single_link_size); 1055 1056 /* Update index */ 1057 index++; 1058 } 1059 } 1060 1061 /* Update the links for application to use the cached links */ 1062 *links = link_cache; 1063 1064 /* Return the number of cached links */ 1065 return lcore_nb_link; 1066 } 1067 1068 static int 1069 eh_tx_adapter_configure(struct eventmode_conf *em_conf, 1070 struct tx_adapter_conf *adapter) 1071 { 1072 struct rte_event_dev_info evdev_default_conf = {0}; 1073 struct rte_event_port_conf port_conf = {0}; 1074 struct tx_adapter_connection_info *conn; 1075 struct eventdev_params *eventdev_config; 1076 uint8_t tx_port_id = 0; 1077 uint8_t eventdev_id; 1078 uint32_t service_id; 1079 int ret, j; 1080 1081 /* Get event dev ID */ 1082 eventdev_id = adapter->eventdev_id; 1083 1084 /* Get event device conf */ 1085 eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id); 1086 1087 /* Create Tx adapter */ 1088 1089 /* Get default configuration of event dev */ 1090 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf); 1091 if (ret < 0) { 1092 EH_LOG_ERR("Failed to get event dev info %d", ret); 1093 return ret; 1094 } 1095 1096 /* Setup port conf */ 1097 port_conf.new_event_threshold = 1098 evdev_default_conf.max_num_events; 1099 port_conf.dequeue_depth = 1100 evdev_default_conf.max_event_port_dequeue_depth; 1101 port_conf.enqueue_depth = 1102 evdev_default_conf.max_event_port_enqueue_depth; 1103 1104 /* Create adapter */ 1105 ret = rte_event_eth_tx_adapter_create(adapter->adapter_id, 1106 adapter->eventdev_id, &port_conf); 1107 if (ret < 0) { 1108 EH_LOG_ERR("Failed to create tx adapter %d", ret); 1109 return ret; 1110 } 1111 1112 /* Setup various connections in the adapter */ 1113 for (j = 0; j < adapter->nb_connections; j++) { 1114 1115 /* Get connection */ 1116 conn = &(adapter->conn[j]); 1117 1118 /* Add queue to the adapter */ 1119 ret = rte_event_eth_tx_adapter_queue_add(adapter->adapter_id, 1120 conn->ethdev_id, conn->ethdev_tx_qid); 1121 if (ret < 0) { 1122 EH_LOG_ERR("Failed to add eth queue to tx adapter %d", 1123 ret); 1124 return ret; 1125 } 1126 } 1127 1128 /* 1129 * Check if Tx core is assigned. If Tx core is not assigned then 1130 * the adapter has internal port for submitting Tx packets and 1131 * Tx event queue & port setup is not required 1132 */ 1133 if (adapter->tx_core_id == (uint32_t) (-1)) { 1134 /* Internal port is present */ 1135 goto skip_tx_queue_port_setup; 1136 } 1137 1138 /* Setup Tx queue & port */ 1139 1140 /* Get event port used by the adapter */ 1141 ret = rte_event_eth_tx_adapter_event_port_get( 1142 adapter->adapter_id, &tx_port_id); 1143 if (ret) { 1144 EH_LOG_ERR("Failed to get tx adapter port id %d", ret); 1145 return ret; 1146 } 1147 1148 /* 1149 * Tx event queue is reserved for Tx adapter. Unlink this queue 1150 * from all other ports 1151 * 1152 */ 1153 for (j = 0; j < eventdev_config->nb_eventport; j++) { 1154 rte_event_port_unlink(eventdev_id, j, 1155 &(adapter->tx_ev_queue), 1); 1156 } 1157 1158 /* Link Tx event queue to Tx port */ 1159 ret = rte_event_port_link(eventdev_id, tx_port_id, 1160 &(adapter->tx_ev_queue), NULL, 1); 1161 if (ret != 1) { 1162 EH_LOG_ERR("Failed to link event queue to port"); 1163 return ret; 1164 } 1165 1166 /* Get the service ID used by Tx adapter */ 1167 ret = rte_event_eth_tx_adapter_service_id_get(adapter->adapter_id, 1168 &service_id); 1169 if (ret != -ESRCH && ret < 0) { 1170 EH_LOG_ERR("Failed to get service id used by tx adapter %d", 1171 ret); 1172 return ret; 1173 } 1174 1175 rte_service_set_runstate_mapped_check(service_id, 0); 1176 1177 skip_tx_queue_port_setup: 1178 /* Start adapter */ 1179 ret = rte_event_eth_tx_adapter_start(adapter->adapter_id); 1180 if (ret < 0) { 1181 EH_LOG_ERR("Failed to start tx adapter %d", ret); 1182 return ret; 1183 } 1184 1185 return 0; 1186 } 1187 1188 static int 1189 eh_initialize_tx_adapter(struct eventmode_conf *em_conf) 1190 { 1191 struct tx_adapter_conf *adapter; 1192 int i, ret; 1193 1194 /* Configure Tx adapters */ 1195 for (i = 0; i < em_conf->nb_tx_adapter; i++) { 1196 adapter = &(em_conf->tx_adapter[i]); 1197 ret = eh_tx_adapter_configure(em_conf, adapter); 1198 if (ret < 0) { 1199 EH_LOG_ERR("Failed to configure tx adapter %d", ret); 1200 return ret; 1201 } 1202 } 1203 return 0; 1204 } 1205 1206 static void 1207 eh_display_operating_mode(struct eventmode_conf *em_conf) 1208 { 1209 char sched_types[][32] = { 1210 "RTE_SCHED_TYPE_ORDERED", 1211 "RTE_SCHED_TYPE_ATOMIC", 1212 "RTE_SCHED_TYPE_PARALLEL", 1213 }; 1214 EH_LOG_INFO("Operating mode:"); 1215 1216 EH_LOG_INFO("\tScheduling type: \t%s", 1217 sched_types[em_conf->ext_params.sched_type]); 1218 1219 EH_LOG_INFO(""); 1220 } 1221 1222 static void 1223 eh_display_event_dev_conf(struct eventmode_conf *em_conf) 1224 { 1225 char queue_mode[][32] = { 1226 "", 1227 "ATQ (ALL TYPE QUEUE)", 1228 "SINGLE LINK", 1229 }; 1230 char print_buf[256] = { 0 }; 1231 int i; 1232 1233 EH_LOG_INFO("Event Device Configuration:"); 1234 1235 for (i = 0; i < em_conf->nb_eventdev; i++) { 1236 sprintf(print_buf, 1237 "\tDev ID: %-2d \tQueues: %-2d \tPorts: %-2d", 1238 em_conf->eventdev_config[i].eventdev_id, 1239 em_conf->eventdev_config[i].nb_eventqueue, 1240 em_conf->eventdev_config[i].nb_eventport); 1241 sprintf(print_buf + strlen(print_buf), 1242 "\tQueue mode: %s", 1243 queue_mode[em_conf->eventdev_config[i].ev_queue_mode]); 1244 EH_LOG_INFO("%s", print_buf); 1245 } 1246 EH_LOG_INFO(""); 1247 } 1248 1249 static void 1250 eh_display_rx_adapter_conf(struct eventmode_conf *em_conf) 1251 { 1252 int nb_rx_adapter = em_conf->nb_rx_adapter; 1253 struct rx_adapter_connection_info *conn; 1254 struct rx_adapter_conf *adapter; 1255 char print_buf[256] = { 0 }; 1256 int i, j; 1257 1258 EH_LOG_INFO("Rx adapters configured: %d", nb_rx_adapter); 1259 1260 for (i = 0; i < nb_rx_adapter; i++) { 1261 adapter = &(em_conf->rx_adapter[i]); 1262 sprintf(print_buf, 1263 "\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d", 1264 adapter->adapter_id, 1265 adapter->nb_connections, 1266 adapter->eventdev_id); 1267 if (adapter->rx_core_id == (uint32_t)-1) 1268 sprintf(print_buf + strlen(print_buf), 1269 "\tRx core: %-2s", "[INTERNAL PORT]"); 1270 else if (adapter->rx_core_id == RTE_MAX_LCORE) 1271 sprintf(print_buf + strlen(print_buf), 1272 "\tRx core: %-2s", "[NONE]"); 1273 else 1274 sprintf(print_buf + strlen(print_buf), 1275 "\tRx core: %-2d", adapter->rx_core_id); 1276 1277 EH_LOG_INFO("%s", print_buf); 1278 1279 for (j = 0; j < adapter->nb_connections; j++) { 1280 conn = &(adapter->conn[j]); 1281 1282 sprintf(print_buf, 1283 "\t\tEthdev ID: %-2d", conn->ethdev_id); 1284 1285 if (conn->ethdev_rx_qid == -1) 1286 sprintf(print_buf + strlen(print_buf), 1287 "\tEth rx queue: %-2s", "ALL"); 1288 else 1289 sprintf(print_buf + strlen(print_buf), 1290 "\tEth rx queue: %-2d", 1291 conn->ethdev_rx_qid); 1292 1293 sprintf(print_buf + strlen(print_buf), 1294 "\tEvent queue: %-2d", conn->eventq_id); 1295 EH_LOG_INFO("%s", print_buf); 1296 } 1297 } 1298 EH_LOG_INFO(""); 1299 } 1300 1301 static void 1302 eh_display_tx_adapter_conf(struct eventmode_conf *em_conf) 1303 { 1304 int nb_tx_adapter = em_conf->nb_tx_adapter; 1305 struct tx_adapter_connection_info *conn; 1306 struct tx_adapter_conf *adapter; 1307 char print_buf[256] = { 0 }; 1308 int i, j; 1309 1310 EH_LOG_INFO("Tx adapters configured: %d", nb_tx_adapter); 1311 1312 for (i = 0; i < nb_tx_adapter; i++) { 1313 adapter = &(em_conf->tx_adapter[i]); 1314 sprintf(print_buf, 1315 "\tTx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d", 1316 adapter->adapter_id, 1317 adapter->nb_connections, 1318 adapter->eventdev_id); 1319 if (adapter->tx_core_id == (uint32_t)-1) 1320 sprintf(print_buf + strlen(print_buf), 1321 "\tTx core: %-2s", "[INTERNAL PORT]"); 1322 else if (adapter->tx_core_id == RTE_MAX_LCORE) 1323 sprintf(print_buf + strlen(print_buf), 1324 "\tTx core: %-2s", "[NONE]"); 1325 else 1326 sprintf(print_buf + strlen(print_buf), 1327 "\tTx core: %-2d,\tInput event queue: %-2d", 1328 adapter->tx_core_id, adapter->tx_ev_queue); 1329 1330 EH_LOG_INFO("%s", print_buf); 1331 1332 for (j = 0; j < adapter->nb_connections; j++) { 1333 conn = &(adapter->conn[j]); 1334 1335 sprintf(print_buf, 1336 "\t\tEthdev ID: %-2d", conn->ethdev_id); 1337 1338 if (conn->ethdev_tx_qid == -1) 1339 sprintf(print_buf + strlen(print_buf), 1340 "\tEth tx queue: %-2s", "ALL"); 1341 else 1342 sprintf(print_buf + strlen(print_buf), 1343 "\tEth tx queue: %-2d", 1344 conn->ethdev_tx_qid); 1345 EH_LOG_INFO("%s", print_buf); 1346 } 1347 } 1348 EH_LOG_INFO(""); 1349 } 1350 1351 static void 1352 eh_display_link_conf(struct eventmode_conf *em_conf) 1353 { 1354 struct eh_event_link_info *link; 1355 char print_buf[256] = { 0 }; 1356 int i; 1357 1358 EH_LOG_INFO("Links configured: %d", em_conf->nb_link); 1359 1360 for (i = 0; i < em_conf->nb_link; i++) { 1361 link = &(em_conf->link[i]); 1362 1363 sprintf(print_buf, 1364 "\tEvent dev ID: %-2d\tEvent port: %-2d", 1365 link->eventdev_id, 1366 link->event_port_id); 1367 1368 if (em_conf->ext_params.all_ev_queue_to_ev_port) 1369 sprintf(print_buf + strlen(print_buf), 1370 "Event queue: %-2s\t", "ALL"); 1371 else 1372 sprintf(print_buf + strlen(print_buf), 1373 "Event queue: %-2d\t", link->eventq_id); 1374 1375 sprintf(print_buf + strlen(print_buf), 1376 "Lcore: %-2d", link->lcore_id); 1377 EH_LOG_INFO("%s", print_buf); 1378 } 1379 EH_LOG_INFO(""); 1380 } 1381 1382 struct eh_conf * 1383 eh_conf_init(void) 1384 { 1385 struct eventmode_conf *em_conf = NULL; 1386 struct eh_conf *conf = NULL; 1387 unsigned int eth_core_id; 1388 void *bitmap = NULL; 1389 uint32_t nb_bytes; 1390 1391 /* Allocate memory for config */ 1392 conf = calloc(1, sizeof(struct eh_conf)); 1393 if (conf == NULL) { 1394 EH_LOG_ERR("Failed to allocate memory for eventmode helper " 1395 "config"); 1396 return NULL; 1397 } 1398 1399 /* Set default conf */ 1400 1401 /* Packet transfer mode: poll */ 1402 conf->mode = EH_PKT_TRANSFER_MODE_POLL; 1403 1404 /* Keep all ethernet ports enabled by default */ 1405 conf->eth_portmask = -1; 1406 1407 /* Allocate memory for event mode params */ 1408 conf->mode_params = calloc(1, sizeof(struct eventmode_conf)); 1409 if (conf->mode_params == NULL) { 1410 EH_LOG_ERR("Failed to allocate memory for event mode params"); 1411 goto free_conf; 1412 } 1413 1414 /* Get eventmode conf */ 1415 em_conf = conf->mode_params; 1416 1417 /* Allocate and initialize bitmap for eth cores */ 1418 nb_bytes = rte_bitmap_get_memory_footprint(RTE_MAX_LCORE); 1419 if (!nb_bytes) { 1420 EH_LOG_ERR("Failed to get bitmap footprint"); 1421 goto free_em_conf; 1422 } 1423 1424 bitmap = rte_zmalloc("event-helper-ethcore-bitmap", nb_bytes, 1425 RTE_CACHE_LINE_SIZE); 1426 if (!bitmap) { 1427 EH_LOG_ERR("Failed to allocate memory for eth cores bitmap\n"); 1428 goto free_em_conf; 1429 } 1430 1431 em_conf->eth_core_mask = rte_bitmap_init(RTE_MAX_LCORE, bitmap, 1432 nb_bytes); 1433 if (!em_conf->eth_core_mask) { 1434 EH_LOG_ERR("Failed to initialize bitmap"); 1435 goto free_bitmap; 1436 } 1437 1438 /* Set schedule type as not set */ 1439 em_conf->ext_params.sched_type = SCHED_TYPE_NOT_SET; 1440 1441 /* Set two cores as eth cores for Rx & Tx */ 1442 1443 /* Use first core other than master core as Rx core */ 1444 eth_core_id = rte_get_next_lcore(0, /* curr core */ 1445 1, /* skip master core */ 1446 0 /* wrap */); 1447 1448 rte_bitmap_set(em_conf->eth_core_mask, eth_core_id); 1449 1450 /* Use next core as Tx core */ 1451 eth_core_id = rte_get_next_lcore(eth_core_id, /* curr core */ 1452 1, /* skip master core */ 1453 0 /* wrap */); 1454 1455 rte_bitmap_set(em_conf->eth_core_mask, eth_core_id); 1456 1457 return conf; 1458 1459 free_bitmap: 1460 rte_free(bitmap); 1461 free_em_conf: 1462 free(em_conf); 1463 free_conf: 1464 free(conf); 1465 return NULL; 1466 } 1467 1468 void 1469 eh_conf_uninit(struct eh_conf *conf) 1470 { 1471 struct eventmode_conf *em_conf = NULL; 1472 1473 if (!conf || !conf->mode_params) 1474 return; 1475 1476 /* Get eventmode conf */ 1477 em_conf = conf->mode_params; 1478 1479 /* Free evenmode configuration memory */ 1480 rte_free(em_conf->eth_core_mask); 1481 free(em_conf); 1482 free(conf); 1483 } 1484 1485 void 1486 eh_display_conf(struct eh_conf *conf) 1487 { 1488 struct eventmode_conf *em_conf; 1489 1490 if (conf == NULL) { 1491 EH_LOG_ERR("Invalid event helper configuration"); 1492 return; 1493 } 1494 1495 if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT) 1496 return; 1497 1498 if (conf->mode_params == NULL) { 1499 EH_LOG_ERR("Invalid event mode parameters"); 1500 return; 1501 } 1502 1503 /* Get eventmode conf */ 1504 em_conf = (struct eventmode_conf *)(conf->mode_params); 1505 1506 /* Display user exposed operating modes */ 1507 eh_display_operating_mode(em_conf); 1508 1509 /* Display event device conf */ 1510 eh_display_event_dev_conf(em_conf); 1511 1512 /* Display Rx adapter conf */ 1513 eh_display_rx_adapter_conf(em_conf); 1514 1515 /* Display Tx adapter conf */ 1516 eh_display_tx_adapter_conf(em_conf); 1517 1518 /* Display event-lcore link */ 1519 eh_display_link_conf(em_conf); 1520 } 1521 1522 int32_t 1523 eh_devs_init(struct eh_conf *conf) 1524 { 1525 struct eventmode_conf *em_conf; 1526 uint16_t port_id; 1527 int ret; 1528 1529 if (conf == NULL) { 1530 EH_LOG_ERR("Invalid event helper configuration"); 1531 return -EINVAL; 1532 } 1533 1534 if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT) 1535 return 0; 1536 1537 if (conf->mode_params == NULL) { 1538 EH_LOG_ERR("Invalid event mode parameters"); 1539 return -EINVAL; 1540 } 1541 1542 /* Get eventmode conf */ 1543 em_conf = conf->mode_params; 1544 1545 /* Eventmode conf would need eth portmask */ 1546 em_conf->eth_portmask = conf->eth_portmask; 1547 1548 /* Validate the requested config */ 1549 ret = eh_validate_conf(em_conf); 1550 if (ret < 0) { 1551 EH_LOG_ERR("Failed to validate the requested config %d", ret); 1552 return ret; 1553 } 1554 1555 /* Display the current configuration */ 1556 eh_display_conf(conf); 1557 1558 /* Stop eth devices before setting up adapter */ 1559 RTE_ETH_FOREACH_DEV(port_id) { 1560 1561 /* Use only the ports enabled */ 1562 if ((conf->eth_portmask & (1 << port_id)) == 0) 1563 continue; 1564 1565 rte_eth_dev_stop(port_id); 1566 } 1567 1568 /* Setup eventdev */ 1569 ret = eh_initialize_eventdev(em_conf); 1570 if (ret < 0) { 1571 EH_LOG_ERR("Failed to initialize event dev %d", ret); 1572 return ret; 1573 } 1574 1575 /* Setup Rx adapter */ 1576 ret = eh_initialize_rx_adapter(em_conf); 1577 if (ret < 0) { 1578 EH_LOG_ERR("Failed to initialize rx adapter %d", ret); 1579 return ret; 1580 } 1581 1582 /* Setup Tx adapter */ 1583 ret = eh_initialize_tx_adapter(em_conf); 1584 if (ret < 0) { 1585 EH_LOG_ERR("Failed to initialize tx adapter %d", ret); 1586 return ret; 1587 } 1588 1589 /* Start eth devices after setting up adapter */ 1590 RTE_ETH_FOREACH_DEV(port_id) { 1591 1592 /* Use only the ports enabled */ 1593 if ((conf->eth_portmask & (1 << port_id)) == 0) 1594 continue; 1595 1596 ret = rte_eth_dev_start(port_id); 1597 if (ret < 0) { 1598 EH_LOG_ERR("Failed to start eth dev %d, %d", 1599 port_id, ret); 1600 return ret; 1601 } 1602 } 1603 1604 return 0; 1605 } 1606 1607 int32_t 1608 eh_devs_uninit(struct eh_conf *conf) 1609 { 1610 struct eventmode_conf *em_conf; 1611 int ret, i, j; 1612 uint16_t id; 1613 1614 if (conf == NULL) { 1615 EH_LOG_ERR("Invalid event helper configuration"); 1616 return -EINVAL; 1617 } 1618 1619 if (conf->mode != EH_PKT_TRANSFER_MODE_EVENT) 1620 return 0; 1621 1622 if (conf->mode_params == NULL) { 1623 EH_LOG_ERR("Invalid event mode parameters"); 1624 return -EINVAL; 1625 } 1626 1627 /* Get eventmode conf */ 1628 em_conf = conf->mode_params; 1629 1630 /* Stop and release rx adapters */ 1631 for (i = 0; i < em_conf->nb_rx_adapter; i++) { 1632 1633 id = em_conf->rx_adapter[i].adapter_id; 1634 ret = rte_event_eth_rx_adapter_stop(id); 1635 if (ret < 0) { 1636 EH_LOG_ERR("Failed to stop rx adapter %d", ret); 1637 return ret; 1638 } 1639 1640 for (j = 0; j < em_conf->rx_adapter[i].nb_connections; j++) { 1641 1642 ret = rte_event_eth_rx_adapter_queue_del(id, 1643 em_conf->rx_adapter[i].conn[j].ethdev_id, -1); 1644 if (ret < 0) { 1645 EH_LOG_ERR( 1646 "Failed to remove rx adapter queues %d", 1647 ret); 1648 return ret; 1649 } 1650 } 1651 1652 ret = rte_event_eth_rx_adapter_free(id); 1653 if (ret < 0) { 1654 EH_LOG_ERR("Failed to free rx adapter %d", ret); 1655 return ret; 1656 } 1657 } 1658 1659 /* Stop and release event devices */ 1660 for (i = 0; i < em_conf->nb_eventdev; i++) { 1661 1662 id = em_conf->eventdev_config[i].eventdev_id; 1663 rte_event_dev_stop(id); 1664 1665 ret = rte_event_dev_close(id); 1666 if (ret < 0) { 1667 EH_LOG_ERR("Failed to close event dev %d, %d", id, ret); 1668 return ret; 1669 } 1670 } 1671 1672 /* Stop and release tx adapters */ 1673 for (i = 0; i < em_conf->nb_tx_adapter; i++) { 1674 1675 id = em_conf->tx_adapter[i].adapter_id; 1676 ret = rte_event_eth_tx_adapter_stop(id); 1677 if (ret < 0) { 1678 EH_LOG_ERR("Failed to stop tx adapter %d", ret); 1679 return ret; 1680 } 1681 1682 for (j = 0; j < em_conf->tx_adapter[i].nb_connections; j++) { 1683 1684 ret = rte_event_eth_tx_adapter_queue_del(id, 1685 em_conf->tx_adapter[i].conn[j].ethdev_id, -1); 1686 if (ret < 0) { 1687 EH_LOG_ERR( 1688 "Failed to remove tx adapter queues %d", 1689 ret); 1690 return ret; 1691 } 1692 } 1693 1694 ret = rte_event_eth_tx_adapter_free(id); 1695 if (ret < 0) { 1696 EH_LOG_ERR("Failed to free tx adapter %d", ret); 1697 return ret; 1698 } 1699 } 1700 1701 return 0; 1702 } 1703 1704 void 1705 eh_launch_worker(struct eh_conf *conf, struct eh_app_worker_params *app_wrkr, 1706 uint8_t nb_wrkr_param) 1707 { 1708 struct eh_app_worker_params *match_wrkr; 1709 struct eh_event_link_info *links = NULL; 1710 struct eventmode_conf *em_conf; 1711 uint32_t lcore_id; 1712 uint8_t nb_links; 1713 1714 if (conf == NULL) { 1715 EH_LOG_ERR("Invalid event helper configuration"); 1716 return; 1717 } 1718 1719 if (conf->mode_params == NULL) { 1720 EH_LOG_ERR("Invalid event mode parameters"); 1721 return; 1722 } 1723 1724 /* Get eventmode conf */ 1725 em_conf = conf->mode_params; 1726 1727 /* Get core ID */ 1728 lcore_id = rte_lcore_id(); 1729 1730 /* Check if this is eth core */ 1731 if (rte_bitmap_get(em_conf->eth_core_mask, lcore_id)) { 1732 eh_start_worker_eth_core(em_conf, lcore_id); 1733 return; 1734 } 1735 1736 if (app_wrkr == NULL || nb_wrkr_param == 0) { 1737 EH_LOG_ERR("Invalid args"); 1738 return; 1739 } 1740 1741 /* 1742 * This is a regular worker thread. The application registers 1743 * multiple workers with various capabilities. Run worker 1744 * based on the selected capabilities of the event 1745 * device configured. 1746 */ 1747 1748 /* Get the first matching worker for the event device */ 1749 match_wrkr = eh_find_worker(lcore_id, conf, app_wrkr, nb_wrkr_param); 1750 if (match_wrkr == NULL) { 1751 EH_LOG_ERR("Failed to match worker registered for lcore %d", 1752 lcore_id); 1753 goto clean_and_exit; 1754 } 1755 1756 /* Verify sanity of the matched worker */ 1757 if (eh_verify_match_worker(match_wrkr) != 1) { 1758 EH_LOG_ERR("Failed to validate the matched worker"); 1759 goto clean_and_exit; 1760 } 1761 1762 /* Get worker links */ 1763 nb_links = eh_get_event_lcore_links(lcore_id, conf, &links); 1764 1765 /* Launch the worker thread */ 1766 match_wrkr->worker_thread(links, nb_links); 1767 1768 /* Free links info memory */ 1769 free(links); 1770 1771 clean_and_exit: 1772 1773 /* Flag eth_cores to stop, if started */ 1774 eh_stop_worker_eth_core(); 1775 } 1776 1777 uint8_t 1778 eh_get_tx_queue(struct eh_conf *conf, uint8_t eventdev_id) 1779 { 1780 struct eventdev_params *eventdev_config; 1781 struct eventmode_conf *em_conf; 1782 1783 if (conf == NULL) { 1784 EH_LOG_ERR("Invalid event helper configuration"); 1785 return -EINVAL; 1786 } 1787 1788 if (conf->mode_params == NULL) { 1789 EH_LOG_ERR("Invalid event mode parameters"); 1790 return -EINVAL; 1791 } 1792 1793 /* Get eventmode conf */ 1794 em_conf = conf->mode_params; 1795 1796 /* Get event device conf */ 1797 eventdev_config = eh_get_eventdev_params(em_conf, eventdev_id); 1798 1799 if (eventdev_config == NULL) { 1800 EH_LOG_ERR("Failed to read eventdev config"); 1801 return -EINVAL; 1802 } 1803 1804 /* 1805 * The last queue is reserved to be used as atomic queue for the 1806 * last stage (eth packet tx stage) 1807 */ 1808 return eventdev_config->nb_eventqueue - 1; 1809 } 1810