1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Intel Corporation. 3 */ 4 #include <rte_spinlock.h> 5 #include <rte_service_component.h> 6 #include <ethdev_driver.h> 7 8 #include "eventdev_pmd.h" 9 #include "eventdev_trace.h" 10 #include "rte_event_eth_tx_adapter.h" 11 12 #define TXA_BATCH_SIZE 32 13 #define TXA_SERVICE_NAME_LEN 32 14 #define TXA_MEM_NAME_LEN 32 15 #define TXA_FLUSH_THRESHOLD 1024 16 #define TXA_RETRY_CNT 100 17 #define TXA_MAX_NB_TX 128 18 #define TXA_INVALID_DEV_ID INT32_C(-1) 19 #define TXA_INVALID_SERVICE_ID INT64_C(-1) 20 21 #define TXA_ADAPTER_ARRAY "txa_adapter_array" 22 #define TXA_SERVICE_DATA_ARRAY "txa_service_data_array" 23 24 #define txa_evdev(id) (&rte_eventdevs[txa_dev_id_array[(id)]]) 25 26 #define txa_dev_caps_get(id) txa_evdev((id))->dev_ops->eth_tx_adapter_caps_get 27 28 #define txa_dev_adapter_create(t) txa_evdev(t)->dev_ops->eth_tx_adapter_create 29 30 #define txa_dev_adapter_create_ext(t) \ 31 txa_evdev(t)->dev_ops->eth_tx_adapter_create 32 33 #define txa_dev_adapter_free(t) txa_evdev(t)->dev_ops->eth_tx_adapter_free 34 35 #define txa_dev_queue_add(id) txa_evdev(id)->dev_ops->eth_tx_adapter_queue_add 36 37 #define txa_dev_queue_del(t) txa_evdev(t)->dev_ops->eth_tx_adapter_queue_del 38 39 #define txa_dev_start(t) txa_evdev(t)->dev_ops->eth_tx_adapter_start 40 41 #define txa_dev_stop(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stop 42 43 #define txa_dev_stats_reset(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stats_reset 44 45 #define txa_dev_stats_get(t) txa_evdev(t)->dev_ops->eth_tx_adapter_stats_get 46 47 #define txa_dev_instance_get(id) \ 48 txa_evdev(id)->dev_ops->eth_tx_adapter_instance_get 49 50 #define txa_dev_queue_start(id) \ 51 txa_evdev(id)->dev_ops->eth_tx_adapter_queue_start 52 53 #define txa_dev_queue_stop(id) \ 54 txa_evdev(id)->dev_ops->eth_tx_adapter_queue_stop 55 56 #define RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) \ 57 do { \ 58 if (!txa_valid_id(id)) { \ 59 RTE_EDEV_LOG_ERR("Invalid eth Tx adapter id = %d", id); \ 60 return retval; \ 61 } \ 62 } while (0) 63 64 #define TXA_CHECK_OR_ERR_RET(id) \ 65 do {\ 66 int ret; \ 67 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET((id), -EINVAL); \ 68 ret = txa_init(); \ 69 if (ret != 0) \ 70 return ret; \ 71 if (!txa_adapter_exist((id))) \ 72 return -EINVAL; \ 73 } while (0) 74 75 #define TXA_CHECK_TXQ(dev, queue) \ 76 do {\ 77 if ((dev)->data->nb_tx_queues == 0) { \ 78 RTE_EDEV_LOG_ERR("No tx queues configured"); \ 79 return -EINVAL; \ 80 } \ 81 if ((queue) != -1 && \ 82 (uint16_t)(queue) >= (dev)->data->nb_tx_queues) { \ 83 RTE_EDEV_LOG_ERR("Invalid tx queue_id %" PRIu16, \ 84 (uint16_t)(queue)); \ 85 return -EINVAL; \ 86 } \ 87 } while (0) 88 89 /* Tx retry callback structure */ 90 struct txa_retry { 91 /* Ethernet port id */ 92 uint16_t port_id; 93 /* Tx queue */ 94 uint16_t tx_queue; 95 /* Adapter ID */ 96 uint8_t id; 97 }; 98 99 /* Per queue structure */ 100 struct txa_service_queue_info { 101 /* Queue has been added */ 102 uint8_t added; 103 /* Queue is stopped */ 104 bool stopped; 105 /* Retry callback argument */ 106 struct txa_retry txa_retry; 107 /* Tx buffer */ 108 struct rte_eth_dev_tx_buffer *tx_buf; 109 }; 110 111 /* PMD private structure */ 112 struct txa_service_data { 113 /* Max mbufs processed in any service function invocation */ 114 uint32_t max_nb_tx; 115 /* Number of Tx queues in adapter */ 116 uint32_t nb_queues; 117 /* Synchronization with data path */ 118 rte_spinlock_t tx_lock; 119 /* Event port ID */ 120 uint8_t port_id; 121 /* Event device identifier */ 122 uint8_t eventdev_id; 123 /* Highest port id supported + 1 */ 124 uint16_t dev_count; 125 /* Loop count to flush Tx buffers */ 126 int loop_cnt; 127 /* Loop count threshold to flush Tx buffers */ 128 uint16_t flush_threshold; 129 /* Per ethernet device structure */ 130 struct txa_service_ethdev *txa_ethdev; 131 /* Statistics */ 132 struct rte_event_eth_tx_adapter_stats stats; 133 /* Adapter Identifier */ 134 uint8_t id; 135 /* Conf arg must be freed */ 136 uint8_t conf_free; 137 /* Configuration callback */ 138 rte_event_eth_tx_adapter_conf_cb conf_cb; 139 /* Configuration callback argument */ 140 void *conf_arg; 141 /* socket id */ 142 int socket_id; 143 /* Per adapter EAL service */ 144 int64_t service_id; 145 /* Memory allocation name */ 146 char mem_name[TXA_MEM_NAME_LEN]; 147 } __rte_cache_aligned; 148 149 /* Per eth device structure */ 150 struct txa_service_ethdev { 151 /* Pointer to ethernet device */ 152 struct rte_eth_dev *dev; 153 /* Number of queues added */ 154 uint16_t nb_queues; 155 /* PMD specific queue data */ 156 void *queues; 157 }; 158 159 /* Array of adapter instances, initialized with event device id 160 * when adapter is created 161 */ 162 static int *txa_dev_id_array; 163 164 /* Array of pointers to service implementation data */ 165 static struct txa_service_data **txa_service_data_array; 166 167 static int32_t txa_service_func(void *args); 168 static int txa_service_adapter_create_ext(uint8_t id, 169 struct rte_eventdev *dev, 170 rte_event_eth_tx_adapter_conf_cb conf_cb, 171 void *conf_arg); 172 static int txa_service_queue_del(uint8_t id, 173 const struct rte_eth_dev *dev, 174 int32_t tx_queue_id); 175 176 static int 177 txa_adapter_exist(uint8_t id) 178 { 179 return txa_dev_id_array[id] != TXA_INVALID_DEV_ID; 180 } 181 182 static inline int 183 txa_valid_id(uint8_t id) 184 { 185 return id < RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE; 186 } 187 188 static void * 189 txa_memzone_array_get(const char *name, unsigned int elt_size, int nb_elems) 190 { 191 const struct rte_memzone *mz; 192 unsigned int sz; 193 194 sz = elt_size * nb_elems; 195 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE); 196 197 mz = rte_memzone_lookup(name); 198 if (mz == NULL) { 199 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0, 200 RTE_CACHE_LINE_SIZE); 201 if (mz == NULL) { 202 RTE_EDEV_LOG_ERR("failed to reserve memzone" 203 " name = %s err = %" 204 PRId32, name, rte_errno); 205 return NULL; 206 } 207 } 208 209 return mz->addr; 210 } 211 212 static int 213 txa_lookup(void) 214 { 215 const struct rte_memzone *mz; 216 217 if (txa_dev_id_array == NULL) { 218 mz = rte_memzone_lookup(TXA_ADAPTER_ARRAY); 219 if (mz == NULL) 220 return -ENOMEM; 221 txa_dev_id_array = mz->addr; 222 } 223 224 if (txa_service_data_array == NULL) { 225 mz = rte_memzone_lookup(TXA_SERVICE_DATA_ARRAY); 226 if (mz == NULL) 227 return -ENOMEM; 228 txa_service_data_array = mz->addr; 229 } 230 231 return 0; 232 } 233 234 static int 235 txa_dev_id_array_init(void) 236 { 237 if (txa_dev_id_array == NULL) { 238 int i; 239 240 txa_dev_id_array = txa_memzone_array_get(TXA_ADAPTER_ARRAY, 241 sizeof(int), 242 RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE); 243 if (txa_dev_id_array == NULL) 244 return -ENOMEM; 245 246 for (i = 0; i < RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE; i++) 247 txa_dev_id_array[i] = TXA_INVALID_DEV_ID; 248 } 249 250 return 0; 251 } 252 253 static int 254 txa_init(void) 255 { 256 return txa_dev_id_array_init(); 257 } 258 259 static int 260 txa_service_data_init(void) 261 { 262 if (txa_service_data_array == NULL) { 263 int i; 264 265 txa_service_data_array = 266 txa_memzone_array_get(TXA_SERVICE_DATA_ARRAY, 267 sizeof(*txa_service_data_array), 268 RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE); 269 if (txa_service_data_array == NULL) 270 return -ENOMEM; 271 272 /* Reset the txa service pointers */ 273 for (i = 0; i < RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE; i++) 274 txa_service_data_array[i] = NULL; 275 } 276 277 return 0; 278 } 279 280 static inline struct txa_service_data * 281 txa_service_id_to_data(uint8_t id) 282 { 283 return txa_service_data_array[id]; 284 } 285 286 static inline struct txa_service_queue_info * 287 txa_service_queue(struct txa_service_data *txa, uint16_t port_id, 288 uint16_t tx_queue_id) 289 { 290 struct txa_service_queue_info *tqi; 291 292 if (unlikely(txa->txa_ethdev == NULL || txa->dev_count < port_id + 1)) 293 return NULL; 294 295 tqi = txa->txa_ethdev[port_id].queues; 296 297 return likely(tqi != NULL) ? tqi + tx_queue_id : NULL; 298 } 299 300 static int 301 txa_service_conf_cb(uint8_t __rte_unused id, uint8_t dev_id, 302 struct rte_event_eth_tx_adapter_conf *conf, void *arg) 303 { 304 int ret; 305 struct rte_eventdev *dev; 306 struct rte_event_port_conf *pc; 307 struct rte_event_dev_config dev_conf; 308 int started; 309 uint8_t port_id; 310 311 pc = arg; 312 dev = &rte_eventdevs[dev_id]; 313 dev_conf = dev->data->dev_conf; 314 315 started = dev->data->dev_started; 316 if (started) 317 rte_event_dev_stop(dev_id); 318 319 port_id = dev_conf.nb_event_ports; 320 dev_conf.nb_event_ports += 1; 321 if (pc->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK) 322 dev_conf.nb_single_link_event_port_queues += 1; 323 324 ret = rte_event_dev_configure(dev_id, &dev_conf); 325 if (ret) { 326 RTE_EDEV_LOG_ERR("failed to configure event dev %u", 327 dev_id); 328 if (started) { 329 if (rte_event_dev_start(dev_id)) 330 return -EIO; 331 } 332 return ret; 333 } 334 335 ret = rte_event_port_setup(dev_id, port_id, pc); 336 if (ret) { 337 RTE_EDEV_LOG_ERR("failed to setup event port %u", 338 port_id); 339 if (started) { 340 if (rte_event_dev_start(dev_id)) 341 return -EIO; 342 } 343 return ret; 344 } 345 346 conf->event_port_id = port_id; 347 conf->max_nb_tx = TXA_MAX_NB_TX; 348 if (started) 349 ret = rte_event_dev_start(dev_id); 350 return ret; 351 } 352 353 static int 354 txa_service_ethdev_alloc(struct txa_service_data *txa) 355 { 356 struct txa_service_ethdev *txa_ethdev; 357 uint16_t i, dev_count; 358 359 dev_count = rte_eth_dev_count_avail(); 360 if (txa->txa_ethdev && dev_count == txa->dev_count) 361 return 0; 362 363 txa_ethdev = rte_zmalloc_socket(txa->mem_name, 364 dev_count * sizeof(*txa_ethdev), 365 0, 366 txa->socket_id); 367 if (txa_ethdev == NULL) { 368 RTE_EDEV_LOG_ERR("Failed to alloc txa::txa_ethdev "); 369 return -ENOMEM; 370 } 371 372 if (txa->dev_count) 373 memcpy(txa_ethdev, txa->txa_ethdev, 374 txa->dev_count * sizeof(*txa_ethdev)); 375 376 RTE_ETH_FOREACH_DEV(i) { 377 if (i == dev_count) 378 break; 379 txa_ethdev[i].dev = &rte_eth_devices[i]; 380 } 381 382 txa->txa_ethdev = txa_ethdev; 383 txa->dev_count = dev_count; 384 return 0; 385 } 386 387 static int 388 txa_service_queue_array_alloc(struct txa_service_data *txa, 389 uint16_t port_id) 390 { 391 struct txa_service_queue_info *tqi; 392 uint16_t nb_queue; 393 int ret; 394 395 ret = txa_service_ethdev_alloc(txa); 396 if (ret != 0) 397 return ret; 398 399 if (txa->txa_ethdev[port_id].queues) 400 return 0; 401 402 nb_queue = txa->txa_ethdev[port_id].dev->data->nb_tx_queues; 403 tqi = rte_zmalloc_socket(txa->mem_name, 404 nb_queue * 405 sizeof(struct txa_service_queue_info), 0, 406 txa->socket_id); 407 if (tqi == NULL) 408 return -ENOMEM; 409 txa->txa_ethdev[port_id].queues = tqi; 410 return 0; 411 } 412 413 static void 414 txa_service_queue_array_free(struct txa_service_data *txa, 415 uint16_t port_id) 416 { 417 struct txa_service_ethdev *txa_ethdev; 418 struct txa_service_queue_info *tqi; 419 420 txa_ethdev = &txa->txa_ethdev[port_id]; 421 if (txa->txa_ethdev == NULL || txa_ethdev->nb_queues != 0) 422 return; 423 424 tqi = txa_ethdev->queues; 425 txa_ethdev->queues = NULL; 426 rte_free(tqi); 427 428 if (txa->nb_queues == 0) { 429 rte_free(txa->txa_ethdev); 430 txa->txa_ethdev = NULL; 431 } 432 } 433 434 static void 435 txa_service_unregister(struct txa_service_data *txa) 436 { 437 if (txa->service_id != TXA_INVALID_SERVICE_ID) { 438 rte_service_component_runstate_set(txa->service_id, 0); 439 while (rte_service_may_be_active(txa->service_id)) 440 rte_pause(); 441 rte_service_component_unregister(txa->service_id); 442 } 443 txa->service_id = TXA_INVALID_SERVICE_ID; 444 } 445 446 static int 447 txa_service_register(struct txa_service_data *txa) 448 { 449 int ret; 450 struct rte_service_spec service; 451 struct rte_event_eth_tx_adapter_conf conf; 452 453 if (txa->service_id != TXA_INVALID_SERVICE_ID) 454 return 0; 455 456 memset(&service, 0, sizeof(service)); 457 snprintf(service.name, TXA_SERVICE_NAME_LEN, "txa_%d", txa->id); 458 service.socket_id = txa->socket_id; 459 service.callback = txa_service_func; 460 service.callback_userdata = txa; 461 service.capabilities = RTE_SERVICE_CAP_MT_SAFE; 462 ret = rte_service_component_register(&service, 463 (uint32_t *)&txa->service_id); 464 if (ret) { 465 RTE_EDEV_LOG_ERR("failed to register service %s err = %" 466 PRId32, service.name, ret); 467 return ret; 468 } 469 470 ret = txa->conf_cb(txa->id, txa->eventdev_id, &conf, txa->conf_arg); 471 if (ret) { 472 txa_service_unregister(txa); 473 return ret; 474 } 475 476 rte_service_component_runstate_set(txa->service_id, 1); 477 txa->port_id = conf.event_port_id; 478 txa->max_nb_tx = conf.max_nb_tx; 479 return 0; 480 } 481 482 static struct rte_eth_dev_tx_buffer * 483 txa_service_tx_buf_alloc(struct txa_service_data *txa, 484 const struct rte_eth_dev *dev) 485 { 486 struct rte_eth_dev_tx_buffer *tb; 487 uint16_t port_id; 488 489 port_id = dev->data->port_id; 490 tb = rte_zmalloc_socket(txa->mem_name, 491 RTE_ETH_TX_BUFFER_SIZE(TXA_BATCH_SIZE), 492 0, 493 rte_eth_dev_socket_id(port_id)); 494 if (tb == NULL) 495 RTE_EDEV_LOG_ERR("Failed to allocate memory for tx buffer"); 496 return tb; 497 } 498 499 static int 500 txa_service_is_queue_added(struct txa_service_data *txa, 501 const struct rte_eth_dev *dev, 502 uint16_t tx_queue_id) 503 { 504 struct txa_service_queue_info *tqi; 505 506 tqi = txa_service_queue(txa, dev->data->port_id, tx_queue_id); 507 return tqi && tqi->added; 508 } 509 510 static int 511 txa_service_ctrl(uint8_t id, int start) 512 { 513 int ret; 514 struct txa_service_data *txa; 515 516 txa = txa_service_id_to_data(id); 517 if (txa == NULL || txa->service_id == TXA_INVALID_SERVICE_ID) 518 return 0; 519 520 rte_spinlock_lock(&txa->tx_lock); 521 ret = rte_service_runstate_set(txa->service_id, start); 522 rte_spinlock_unlock(&txa->tx_lock); 523 524 return ret; 525 } 526 527 static void 528 txa_service_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent, 529 void *userdata) 530 { 531 struct txa_retry *tr; 532 struct txa_service_data *data; 533 struct rte_event_eth_tx_adapter_stats *stats; 534 uint16_t sent = 0; 535 unsigned int retry = 0; 536 uint16_t i, n; 537 538 tr = (struct txa_retry *)(uintptr_t)userdata; 539 data = txa_service_id_to_data(tr->id); 540 stats = &data->stats; 541 542 do { 543 n = rte_eth_tx_burst(tr->port_id, tr->tx_queue, 544 &pkts[sent], unsent - sent); 545 546 sent += n; 547 } while (sent != unsent && retry++ < TXA_RETRY_CNT); 548 549 for (i = sent; i < unsent; i++) 550 rte_pktmbuf_free(pkts[i]); 551 552 stats->tx_retry += retry; 553 stats->tx_packets += sent; 554 stats->tx_dropped += unsent - sent; 555 } 556 557 static uint16_t 558 txa_process_event_vector(struct txa_service_data *txa, 559 struct rte_event_vector *vec) 560 { 561 struct txa_service_queue_info *tqi; 562 uint16_t port, queue, nb_tx = 0; 563 struct rte_mbuf **mbufs; 564 int i; 565 566 mbufs = (struct rte_mbuf **)vec->mbufs; 567 if (vec->attr_valid) { 568 port = vec->port; 569 queue = vec->queue; 570 tqi = txa_service_queue(txa, port, queue); 571 if (unlikely(tqi == NULL || !tqi->added || tqi->stopped)) { 572 rte_pktmbuf_free_bulk(&mbufs[vec->elem_offset], 573 vec->nb_elem); 574 rte_mempool_put(rte_mempool_from_obj(vec), vec); 575 return 0; 576 } 577 for (i = 0; i < vec->nb_elem; i++) { 578 nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf, 579 mbufs[i + vec->elem_offset]); 580 } 581 } else { 582 for (i = vec->elem_offset; i < vec->elem_offset + vec->nb_elem; 583 i++) { 584 port = mbufs[i]->port; 585 queue = rte_event_eth_tx_adapter_txq_get(mbufs[i]); 586 tqi = txa_service_queue(txa, port, queue); 587 if (unlikely(tqi == NULL || !tqi->added || 588 tqi->stopped)) { 589 rte_pktmbuf_free(mbufs[i]); 590 continue; 591 } 592 nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf, 593 mbufs[i]); 594 } 595 } 596 rte_mempool_put(rte_mempool_from_obj(vec), vec); 597 598 return nb_tx; 599 } 600 601 static void 602 txa_service_tx(struct txa_service_data *txa, struct rte_event *ev, 603 uint32_t n) 604 { 605 uint32_t i; 606 uint16_t nb_tx; 607 struct rte_event_eth_tx_adapter_stats *stats; 608 609 stats = &txa->stats; 610 611 nb_tx = 0; 612 for (i = 0; i < n; i++) { 613 uint16_t port; 614 uint16_t queue; 615 struct txa_service_queue_info *tqi; 616 617 if (!(ev[i].event_type & RTE_EVENT_TYPE_VECTOR)) { 618 struct rte_mbuf *m; 619 620 m = ev[i].mbuf; 621 port = m->port; 622 queue = rte_event_eth_tx_adapter_txq_get(m); 623 624 tqi = txa_service_queue(txa, port, queue); 625 if (unlikely(tqi == NULL || !tqi->added || 626 tqi->stopped)) { 627 rte_pktmbuf_free(m); 628 continue; 629 } 630 631 nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf, m); 632 } else { 633 nb_tx += txa_process_event_vector(txa, ev[i].vec); 634 } 635 } 636 637 stats->tx_packets += nb_tx; 638 } 639 640 static int32_t 641 txa_service_func(void *args) 642 { 643 struct txa_service_data *txa = args; 644 uint8_t dev_id; 645 uint8_t port; 646 int ret = -EAGAIN; 647 uint16_t n; 648 uint32_t nb_tx, max_nb_tx; 649 struct rte_event ev[TXA_BATCH_SIZE]; 650 651 dev_id = txa->eventdev_id; 652 max_nb_tx = txa->max_nb_tx; 653 port = txa->port_id; 654 655 if (txa->nb_queues == 0) 656 return ret; 657 658 if (!rte_spinlock_trylock(&txa->tx_lock)) 659 return ret; 660 661 for (nb_tx = 0; nb_tx < max_nb_tx; nb_tx += n) { 662 663 n = rte_event_dequeue_burst(dev_id, port, ev, RTE_DIM(ev), 0); 664 if (!n) 665 break; 666 txa_service_tx(txa, ev, n); 667 ret = 0; 668 } 669 670 if (txa->loop_cnt++ == txa->flush_threshold) { 671 672 struct txa_service_ethdev *tdi; 673 struct txa_service_queue_info *tqi; 674 struct rte_eth_dev *dev; 675 uint16_t i; 676 677 txa->loop_cnt = 0; 678 tdi = txa->txa_ethdev; 679 nb_tx = 0; 680 681 RTE_ETH_FOREACH_DEV(i) { 682 uint16_t q; 683 684 if (i >= txa->dev_count) 685 break; 686 687 dev = tdi[i].dev; 688 if (tdi[i].nb_queues == 0) 689 continue; 690 for (q = 0; q < dev->data->nb_tx_queues; q++) { 691 692 tqi = txa_service_queue(txa, i, q); 693 if (unlikely(tqi == NULL || !tqi->added || 694 tqi->stopped)) 695 continue; 696 697 nb_tx += rte_eth_tx_buffer_flush(i, q, 698 tqi->tx_buf); 699 } 700 } 701 702 if (likely(nb_tx > 0)) { 703 txa->stats.tx_packets += nb_tx; 704 ret = 0; 705 } 706 } 707 rte_spinlock_unlock(&txa->tx_lock); 708 return ret; 709 } 710 711 static int 712 txa_service_adapter_create(uint8_t id, struct rte_eventdev *dev, 713 struct rte_event_port_conf *port_conf) 714 { 715 struct txa_service_data *txa; 716 struct rte_event_port_conf *cb_conf; 717 int ret; 718 719 cb_conf = rte_malloc(NULL, sizeof(*cb_conf), 0); 720 if (cb_conf == NULL) 721 return -ENOMEM; 722 723 *cb_conf = *port_conf; 724 ret = txa_service_adapter_create_ext(id, dev, txa_service_conf_cb, 725 cb_conf); 726 if (ret) { 727 rte_free(cb_conf); 728 return ret; 729 } 730 731 txa = txa_service_id_to_data(id); 732 txa->conf_free = 1; 733 return ret; 734 } 735 736 static int 737 txa_service_adapter_create_ext(uint8_t id, struct rte_eventdev *dev, 738 rte_event_eth_tx_adapter_conf_cb conf_cb, 739 void *conf_arg) 740 { 741 struct txa_service_data *txa; 742 int socket_id; 743 char mem_name[TXA_SERVICE_NAME_LEN]; 744 int ret; 745 746 if (conf_cb == NULL) 747 return -EINVAL; 748 749 socket_id = dev->data->socket_id; 750 snprintf(mem_name, TXA_MEM_NAME_LEN, 751 "rte_event_eth_txa_%d", 752 id); 753 754 ret = txa_service_data_init(); 755 if (ret != 0) 756 return ret; 757 758 txa = rte_zmalloc_socket(mem_name, 759 sizeof(*txa), 760 RTE_CACHE_LINE_SIZE, socket_id); 761 if (txa == NULL) { 762 RTE_EDEV_LOG_ERR("failed to get mem for tx adapter"); 763 return -ENOMEM; 764 } 765 766 txa->id = id; 767 txa->eventdev_id = dev->data->dev_id; 768 txa->socket_id = socket_id; 769 strncpy(txa->mem_name, mem_name, TXA_SERVICE_NAME_LEN); 770 txa->conf_cb = conf_cb; 771 txa->conf_arg = conf_arg; 772 txa->service_id = TXA_INVALID_SERVICE_ID; 773 rte_spinlock_init(&txa->tx_lock); 774 txa_service_data_array[id] = txa; 775 txa->flush_threshold = TXA_FLUSH_THRESHOLD; 776 777 return 0; 778 } 779 780 static int 781 txa_service_event_port_get(uint8_t id, uint8_t *port) 782 { 783 struct txa_service_data *txa; 784 785 txa = txa_service_id_to_data(id); 786 if (txa->service_id == TXA_INVALID_SERVICE_ID) 787 return -ENODEV; 788 789 *port = txa->port_id; 790 return 0; 791 } 792 793 static int 794 txa_service_adapter_free(uint8_t id) 795 { 796 struct txa_service_data *txa; 797 798 txa = txa_service_id_to_data(id); 799 if (txa->nb_queues) { 800 RTE_EDEV_LOG_ERR("%" PRIu16 " Tx queues not deleted", 801 txa->nb_queues); 802 return -EBUSY; 803 } 804 805 if (txa->conf_free) 806 rte_free(txa->conf_arg); 807 rte_free(txa); 808 return 0; 809 } 810 811 static int 812 txa_service_queue_add(uint8_t id, 813 __rte_unused struct rte_eventdev *dev, 814 const struct rte_eth_dev *eth_dev, 815 int32_t tx_queue_id) 816 { 817 struct txa_service_data *txa; 818 struct txa_service_ethdev *tdi; 819 struct txa_service_queue_info *tqi; 820 struct rte_eth_dev_tx_buffer *tb; 821 struct txa_retry *txa_retry; 822 int ret = 0; 823 824 txa = txa_service_id_to_data(id); 825 826 if (tx_queue_id == -1) { 827 int nb_queues; 828 uint16_t i, j; 829 uint16_t *qdone; 830 831 nb_queues = eth_dev->data->nb_tx_queues; 832 if (txa->dev_count > eth_dev->data->port_id) { 833 tdi = &txa->txa_ethdev[eth_dev->data->port_id]; 834 nb_queues -= tdi->nb_queues; 835 } 836 837 qdone = rte_zmalloc(txa->mem_name, 838 nb_queues * sizeof(*qdone), 0); 839 if (qdone == NULL) 840 return -ENOMEM; 841 j = 0; 842 for (i = 0; i < nb_queues; i++) { 843 if (txa_service_is_queue_added(txa, eth_dev, i)) 844 continue; 845 ret = txa_service_queue_add(id, dev, eth_dev, i); 846 if (ret == 0) 847 qdone[j++] = i; 848 else 849 break; 850 } 851 852 if (i != nb_queues) { 853 for (i = 0; i < j; i++) 854 txa_service_queue_del(id, eth_dev, qdone[i]); 855 } 856 rte_free(qdone); 857 return ret; 858 } 859 860 ret = txa_service_register(txa); 861 if (ret) 862 return ret; 863 864 rte_spinlock_lock(&txa->tx_lock); 865 866 if (txa_service_is_queue_added(txa, eth_dev, tx_queue_id)) 867 goto ret_unlock; 868 869 ret = txa_service_queue_array_alloc(txa, eth_dev->data->port_id); 870 if (ret) 871 goto err_unlock; 872 873 tb = txa_service_tx_buf_alloc(txa, eth_dev); 874 if (tb == NULL) 875 goto err_unlock; 876 877 tdi = &txa->txa_ethdev[eth_dev->data->port_id]; 878 tqi = txa_service_queue(txa, eth_dev->data->port_id, tx_queue_id); 879 if (tqi == NULL) 880 goto err_unlock; 881 882 txa_retry = &tqi->txa_retry; 883 txa_retry->id = txa->id; 884 txa_retry->port_id = eth_dev->data->port_id; 885 txa_retry->tx_queue = tx_queue_id; 886 887 rte_eth_tx_buffer_init(tb, TXA_BATCH_SIZE); 888 rte_eth_tx_buffer_set_err_callback(tb, 889 txa_service_buffer_retry, txa_retry); 890 891 tqi->tx_buf = tb; 892 tqi->added = 1; 893 tqi->stopped = false; 894 tdi->nb_queues++; 895 txa->nb_queues++; 896 897 ret_unlock: 898 rte_spinlock_unlock(&txa->tx_lock); 899 return 0; 900 901 err_unlock: 902 if (txa->nb_queues == 0) { 903 txa_service_queue_array_free(txa, 904 eth_dev->data->port_id); 905 txa_service_unregister(txa); 906 } 907 908 rte_spinlock_unlock(&txa->tx_lock); 909 return -1; 910 } 911 912 static inline void 913 txa_txq_buffer_drain(struct txa_service_queue_info *tqi) 914 { 915 struct rte_eth_dev_tx_buffer *b; 916 uint16_t i; 917 918 b = tqi->tx_buf; 919 920 for (i = 0; i < b->length; i++) 921 rte_pktmbuf_free(b->pkts[i]); 922 923 b->length = 0; 924 } 925 926 static int 927 txa_service_queue_del(uint8_t id, 928 const struct rte_eth_dev *dev, 929 int32_t tx_queue_id) 930 { 931 struct txa_service_data *txa; 932 struct txa_service_queue_info *tqi; 933 struct rte_eth_dev_tx_buffer *tb; 934 uint16_t port_id; 935 936 txa = txa_service_id_to_data(id); 937 port_id = dev->data->port_id; 938 939 if (tx_queue_id == -1) { 940 uint16_t i, q, nb_queues; 941 int ret = 0; 942 943 if (txa->txa_ethdev == NULL) 944 return 0; 945 nb_queues = txa->txa_ethdev[port_id].nb_queues; 946 if (nb_queues == 0) 947 return 0; 948 949 i = 0; 950 q = 0; 951 tqi = txa->txa_ethdev[port_id].queues; 952 953 while (i < nb_queues) { 954 955 if (tqi[q].added) { 956 ret = txa_service_queue_del(id, dev, q); 957 i++; 958 if (ret != 0) 959 break; 960 } 961 q++; 962 } 963 return ret; 964 } 965 966 txa = txa_service_id_to_data(id); 967 968 rte_spinlock_lock(&txa->tx_lock); 969 tqi = txa_service_queue(txa, port_id, tx_queue_id); 970 if (tqi == NULL || !tqi->added) 971 goto ret_unlock; 972 973 /* Drain the buffered mbufs */ 974 txa_txq_buffer_drain(tqi); 975 tb = tqi->tx_buf; 976 tqi->added = 0; 977 tqi->tx_buf = NULL; 978 rte_free(tb); 979 txa->nb_queues--; 980 txa->txa_ethdev[port_id].nb_queues--; 981 982 txa_service_queue_array_free(txa, port_id); 983 984 ret_unlock: 985 rte_spinlock_unlock(&txa->tx_lock); 986 return 0; 987 } 988 989 static int 990 txa_service_id_get(uint8_t id, uint32_t *service_id) 991 { 992 struct txa_service_data *txa; 993 994 txa = txa_service_id_to_data(id); 995 if (txa->service_id == TXA_INVALID_SERVICE_ID) 996 return -ESRCH; 997 998 if (service_id == NULL) 999 return -EINVAL; 1000 1001 *service_id = txa->service_id; 1002 1003 rte_eventdev_trace_eth_tx_adapter_service_id_get(id, *service_id); 1004 return 0; 1005 } 1006 1007 static int 1008 txa_service_start(uint8_t id) 1009 { 1010 return txa_service_ctrl(id, 1); 1011 } 1012 1013 static int 1014 txa_service_stats_get(uint8_t id, 1015 struct rte_event_eth_tx_adapter_stats *stats) 1016 { 1017 struct txa_service_data *txa; 1018 1019 txa = txa_service_id_to_data(id); 1020 *stats = txa->stats; 1021 return 0; 1022 } 1023 1024 static int 1025 txa_service_stats_reset(uint8_t id) 1026 { 1027 struct txa_service_data *txa; 1028 1029 txa = txa_service_id_to_data(id); 1030 memset(&txa->stats, 0, sizeof(txa->stats)); 1031 return 0; 1032 } 1033 1034 static int 1035 txa_service_stop(uint8_t id) 1036 { 1037 return txa_service_ctrl(id, 0); 1038 } 1039 1040 1041 int 1042 rte_event_eth_tx_adapter_create(uint8_t id, uint8_t dev_id, 1043 struct rte_event_port_conf *port_conf) 1044 { 1045 struct rte_eventdev *dev; 1046 int ret; 1047 1048 if (port_conf == NULL) 1049 return -EINVAL; 1050 1051 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 1052 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1053 1054 dev = &rte_eventdevs[dev_id]; 1055 1056 ret = txa_init(); 1057 if (ret != 0) 1058 return ret; 1059 1060 if (txa_adapter_exist(id)) 1061 return -EEXIST; 1062 1063 txa_dev_id_array[id] = dev_id; 1064 if (txa_dev_adapter_create(id)) 1065 ret = txa_dev_adapter_create(id)(id, dev); 1066 1067 if (ret != 0) { 1068 txa_dev_id_array[id] = TXA_INVALID_DEV_ID; 1069 return ret; 1070 } 1071 1072 ret = txa_service_adapter_create(id, dev, port_conf); 1073 if (ret != 0) { 1074 if (txa_dev_adapter_free(id)) 1075 txa_dev_adapter_free(id)(id, dev); 1076 txa_dev_id_array[id] = TXA_INVALID_DEV_ID; 1077 return ret; 1078 } 1079 rte_eventdev_trace_eth_tx_adapter_create(id, dev_id, NULL, port_conf, 1080 ret); 1081 txa_dev_id_array[id] = dev_id; 1082 return 0; 1083 } 1084 1085 int 1086 rte_event_eth_tx_adapter_create_ext(uint8_t id, uint8_t dev_id, 1087 rte_event_eth_tx_adapter_conf_cb conf_cb, 1088 void *conf_arg) 1089 { 1090 struct rte_eventdev *dev; 1091 int ret; 1092 1093 RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 1094 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1095 1096 ret = txa_init(); 1097 if (ret != 0) 1098 return ret; 1099 1100 if (txa_adapter_exist(id)) 1101 return -EINVAL; 1102 1103 dev = &rte_eventdevs[dev_id]; 1104 1105 txa_dev_id_array[id] = dev_id; 1106 if (txa_dev_adapter_create_ext(id)) 1107 ret = txa_dev_adapter_create_ext(id)(id, dev); 1108 1109 if (ret != 0) { 1110 txa_dev_id_array[id] = TXA_INVALID_DEV_ID; 1111 return ret; 1112 } 1113 1114 ret = txa_service_adapter_create_ext(id, dev, conf_cb, conf_arg); 1115 if (ret != 0) { 1116 if (txa_dev_adapter_free(id)) 1117 txa_dev_adapter_free(id)(id, dev); 1118 txa_dev_id_array[id] = TXA_INVALID_DEV_ID; 1119 return ret; 1120 } 1121 1122 rte_eventdev_trace_eth_tx_adapter_create(id, dev_id, conf_cb, conf_arg, 1123 ret); 1124 txa_dev_id_array[id] = dev_id; 1125 return 0; 1126 } 1127 1128 1129 int 1130 rte_event_eth_tx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id) 1131 { 1132 rte_eventdev_trace_eth_tx_adapter_event_port_get(id); 1133 1134 TXA_CHECK_OR_ERR_RET(id); 1135 1136 return txa_service_event_port_get(id, event_port_id); 1137 } 1138 1139 int 1140 rte_event_eth_tx_adapter_free(uint8_t id) 1141 { 1142 int ret; 1143 1144 TXA_CHECK_OR_ERR_RET(id); 1145 1146 ret = txa_dev_adapter_free(id) ? 1147 txa_dev_adapter_free(id)(id, txa_evdev(id)) : 1148 0; 1149 1150 if (ret == 0) 1151 ret = txa_service_adapter_free(id); 1152 txa_dev_id_array[id] = TXA_INVALID_DEV_ID; 1153 1154 rte_eventdev_trace_eth_tx_adapter_free(id, ret); 1155 return ret; 1156 } 1157 1158 int 1159 rte_event_eth_tx_adapter_queue_add(uint8_t id, 1160 uint16_t eth_dev_id, 1161 int32_t queue) 1162 { 1163 struct rte_eth_dev *eth_dev; 1164 int ret; 1165 uint32_t caps; 1166 1167 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); 1168 TXA_CHECK_OR_ERR_RET(id); 1169 1170 eth_dev = &rte_eth_devices[eth_dev_id]; 1171 TXA_CHECK_TXQ(eth_dev, queue); 1172 1173 caps = 0; 1174 if (txa_dev_caps_get(id)) 1175 txa_dev_caps_get(id)(txa_evdev(id), eth_dev, &caps); 1176 1177 if (caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT) 1178 ret = txa_dev_queue_add(id) ? 1179 txa_dev_queue_add(id)(id, 1180 txa_evdev(id), 1181 eth_dev, 1182 queue) : 0; 1183 else 1184 ret = txa_service_queue_add(id, txa_evdev(id), eth_dev, queue); 1185 1186 rte_eventdev_trace_eth_tx_adapter_queue_add(id, eth_dev_id, queue, 1187 ret); 1188 return ret; 1189 } 1190 1191 int 1192 rte_event_eth_tx_adapter_queue_del(uint8_t id, 1193 uint16_t eth_dev_id, 1194 int32_t queue) 1195 { 1196 struct rte_eth_dev *eth_dev; 1197 int ret; 1198 uint32_t caps; 1199 1200 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); 1201 TXA_CHECK_OR_ERR_RET(id); 1202 1203 eth_dev = &rte_eth_devices[eth_dev_id]; 1204 1205 caps = 0; 1206 1207 if (txa_dev_caps_get(id)) 1208 txa_dev_caps_get(id)(txa_evdev(id), eth_dev, &caps); 1209 1210 if (caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT) 1211 ret = txa_dev_queue_del(id) ? 1212 txa_dev_queue_del(id)(id, txa_evdev(id), 1213 eth_dev, 1214 queue) : 0; 1215 else 1216 ret = txa_service_queue_del(id, eth_dev, queue); 1217 1218 rte_eventdev_trace_eth_tx_adapter_queue_del(id, eth_dev_id, queue, 1219 ret); 1220 return ret; 1221 } 1222 1223 int 1224 rte_event_eth_tx_adapter_service_id_get(uint8_t id, uint32_t *service_id) 1225 { 1226 TXA_CHECK_OR_ERR_RET(id); 1227 1228 return txa_service_id_get(id, service_id); 1229 } 1230 1231 int 1232 rte_event_eth_tx_adapter_start(uint8_t id) 1233 { 1234 int ret; 1235 1236 TXA_CHECK_OR_ERR_RET(id); 1237 1238 ret = txa_dev_start(id) ? txa_dev_start(id)(id, txa_evdev(id)) : 0; 1239 if (ret == 0) 1240 ret = txa_service_start(id); 1241 rte_eventdev_trace_eth_tx_adapter_start(id, ret); 1242 return ret; 1243 } 1244 1245 int 1246 rte_event_eth_tx_adapter_stats_get(uint8_t id, 1247 struct rte_event_eth_tx_adapter_stats *stats) 1248 { 1249 int ret; 1250 1251 TXA_CHECK_OR_ERR_RET(id); 1252 1253 if (stats == NULL) 1254 return -EINVAL; 1255 1256 *stats = (struct rte_event_eth_tx_adapter_stats){0}; 1257 1258 ret = txa_dev_stats_get(id) ? 1259 txa_dev_stats_get(id)(id, txa_evdev(id), stats) : 0; 1260 1261 if (ret == 0 && txa_service_id_get(id, NULL) != ESRCH) { 1262 if (txa_dev_stats_get(id)) { 1263 struct rte_event_eth_tx_adapter_stats service_stats; 1264 1265 ret = txa_service_stats_get(id, &service_stats); 1266 if (ret == 0) { 1267 stats->tx_retry += service_stats.tx_retry; 1268 stats->tx_packets += service_stats.tx_packets; 1269 stats->tx_dropped += service_stats.tx_dropped; 1270 } 1271 } else 1272 ret = txa_service_stats_get(id, stats); 1273 } 1274 1275 rte_eventdev_trace_eth_tx_adapter_stats_get(id, stats->tx_retry, stats->tx_packets, 1276 stats->tx_dropped, ret); 1277 1278 return ret; 1279 } 1280 1281 int 1282 rte_event_eth_tx_adapter_stats_reset(uint8_t id) 1283 { 1284 int ret; 1285 1286 TXA_CHECK_OR_ERR_RET(id); 1287 1288 ret = txa_dev_stats_reset(id) ? 1289 txa_dev_stats_reset(id)(id, txa_evdev(id)) : 0; 1290 if (ret == 0) 1291 ret = txa_service_stats_reset(id); 1292 1293 rte_eventdev_trace_eth_tx_adapter_stats_reset(id, ret); 1294 1295 return ret; 1296 } 1297 1298 int 1299 rte_event_eth_tx_adapter_runtime_params_init( 1300 struct rte_event_eth_tx_adapter_runtime_params *txa_params) 1301 { 1302 if (txa_params == NULL) 1303 return -EINVAL; 1304 1305 memset(txa_params, 0, sizeof(*txa_params)); 1306 txa_params->max_nb_tx = TXA_MAX_NB_TX; 1307 txa_params->flush_threshold = TXA_FLUSH_THRESHOLD; 1308 1309 return 0; 1310 } 1311 1312 static int 1313 txa_caps_check(struct txa_service_data *txa) 1314 { 1315 if (!txa->dev_count) 1316 return -EINVAL; 1317 1318 if (txa->service_id != TXA_INVALID_SERVICE_ID) 1319 return 0; 1320 1321 return -ENOTSUP; 1322 } 1323 1324 int 1325 rte_event_eth_tx_adapter_runtime_params_set(uint8_t id, 1326 struct rte_event_eth_tx_adapter_runtime_params *txa_params) 1327 { 1328 struct txa_service_data *txa; 1329 int ret; 1330 1331 if (txa_lookup()) 1332 return -ENOMEM; 1333 1334 TXA_CHECK_OR_ERR_RET(id); 1335 1336 if (txa_params == NULL) 1337 return -EINVAL; 1338 1339 txa = txa_service_id_to_data(id); 1340 if (txa == NULL) 1341 return -EINVAL; 1342 1343 ret = txa_caps_check(txa); 1344 if (ret) 1345 return ret; 1346 1347 rte_spinlock_lock(&txa->tx_lock); 1348 txa->flush_threshold = txa_params->flush_threshold; 1349 txa->max_nb_tx = txa_params->max_nb_tx; 1350 rte_spinlock_unlock(&txa->tx_lock); 1351 1352 return 0; 1353 } 1354 1355 int 1356 rte_event_eth_tx_adapter_runtime_params_get(uint8_t id, 1357 struct rte_event_eth_tx_adapter_runtime_params *txa_params) 1358 { 1359 struct txa_service_data *txa; 1360 int ret; 1361 1362 if (txa_lookup()) 1363 return -ENOMEM; 1364 1365 TXA_CHECK_OR_ERR_RET(id); 1366 1367 if (txa_params == NULL) 1368 return -EINVAL; 1369 1370 txa = txa_service_id_to_data(id); 1371 if (txa == NULL) 1372 return -EINVAL; 1373 1374 ret = txa_caps_check(txa); 1375 if (ret) 1376 return ret; 1377 1378 rte_spinlock_lock(&txa->tx_lock); 1379 txa_params->flush_threshold = txa->flush_threshold; 1380 txa_params->max_nb_tx = txa->max_nb_tx; 1381 rte_spinlock_unlock(&txa->tx_lock); 1382 1383 return 0; 1384 } 1385 1386 int 1387 rte_event_eth_tx_adapter_stop(uint8_t id) 1388 { 1389 int ret; 1390 1391 TXA_CHECK_OR_ERR_RET(id); 1392 1393 ret = txa_dev_stop(id) ? txa_dev_stop(id)(id, txa_evdev(id)) : 0; 1394 if (ret == 0) 1395 ret = txa_service_stop(id); 1396 rte_eventdev_trace_eth_tx_adapter_stop(id, ret); 1397 return ret; 1398 } 1399 1400 int 1401 rte_event_eth_tx_adapter_instance_get(uint16_t eth_dev_id, 1402 uint16_t tx_queue_id, 1403 uint8_t *txa_inst_id) 1404 { 1405 uint8_t id; 1406 int ret = -EINVAL; 1407 uint32_t caps; 1408 struct txa_service_data *txa; 1409 1410 if (txa_lookup()) 1411 return -ENOMEM; 1412 1413 if (eth_dev_id >= rte_eth_dev_count_avail()) { 1414 RTE_EDEV_LOG_ERR("Invalid ethernet port id %u", eth_dev_id); 1415 return -EINVAL; 1416 } 1417 1418 if (tx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_tx_queues) { 1419 RTE_EDEV_LOG_ERR("Invalid tx queue id %u", tx_queue_id); 1420 return -EINVAL; 1421 } 1422 1423 if (txa_inst_id == NULL) { 1424 RTE_EDEV_LOG_ERR("txa_instance_id cannot be NULL"); 1425 return -EINVAL; 1426 } 1427 1428 /* Iterate through all Tx adapter instances */ 1429 for (id = 0; id < RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE; id++) { 1430 txa = txa_service_id_to_data(id); 1431 if (!txa) 1432 continue; 1433 1434 caps = 0; 1435 if (rte_event_eth_tx_adapter_caps_get(txa->eventdev_id, 1436 eth_dev_id, 1437 &caps)) 1438 continue; 1439 1440 if (caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT) { 1441 ret = txa_dev_instance_get(id) ? 1442 txa_dev_instance_get(id)(eth_dev_id, 1443 tx_queue_id, 1444 txa_inst_id) 1445 : -EINVAL; 1446 if (ret == 0) { 1447 rte_eventdev_trace_eth_tx_adapter_instance_get(eth_dev_id, 1448 tx_queue_id, *txa_inst_id); 1449 return ret; 1450 } 1451 } else { 1452 struct rte_eth_dev *eth_dev; 1453 1454 eth_dev = &rte_eth_devices[eth_dev_id]; 1455 1456 if (txa_service_is_queue_added(txa, eth_dev, 1457 tx_queue_id)) { 1458 *txa_inst_id = txa->id; 1459 rte_eventdev_trace_eth_tx_adapter_instance_get(eth_dev_id, 1460 tx_queue_id, *txa_inst_id); 1461 return 0; 1462 } 1463 } 1464 } 1465 1466 return -EINVAL; 1467 } 1468 1469 static inline int 1470 txa_sw_queue_start_state_set(uint16_t eth_dev_id, uint16_t tx_queue_id, 1471 bool start_state, struct txa_service_data *txa) 1472 { 1473 struct txa_service_queue_info *tqi = NULL; 1474 1475 rte_spinlock_lock(&txa->tx_lock); 1476 tqi = txa_service_queue(txa, eth_dev_id, tx_queue_id); 1477 if (unlikely(tqi == NULL || !tqi->added)) { 1478 rte_spinlock_unlock(&txa->tx_lock); 1479 return -EINVAL; 1480 } 1481 if (start_state == false) 1482 txa_txq_buffer_drain(tqi); 1483 1484 tqi->stopped = !start_state; 1485 rte_spinlock_unlock(&txa->tx_lock); 1486 return 0; 1487 } 1488 1489 static int 1490 txa_queue_start_state_set(uint16_t eth_dev_id, uint16_t tx_queue_id, 1491 bool start_state) 1492 { 1493 struct txa_service_data *txa; 1494 uint8_t txa_inst_id; 1495 int ret; 1496 uint32_t caps = 0; 1497 1498 /* Below API already does validation of input parameters. 1499 * Hence skipping the validation here. 1500 */ 1501 ret = rte_event_eth_tx_adapter_instance_get(eth_dev_id, 1502 tx_queue_id, 1503 &txa_inst_id); 1504 if (ret < 0) 1505 return -EINVAL; 1506 1507 txa = txa_service_id_to_data(txa_inst_id); 1508 ret = rte_event_eth_tx_adapter_caps_get(txa->eventdev_id, 1509 eth_dev_id, 1510 &caps); 1511 if (ret < 0) 1512 return -EINVAL; 1513 1514 if (caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT) { 1515 if (start_state == true) { 1516 ret = txa_dev_queue_start(txa_inst_id) ? 1517 txa_dev_queue_start(txa_inst_id)(txa_inst_id, 1518 eth_dev_id, 1519 tx_queue_id) : 0; 1520 } else { 1521 ret = txa_dev_queue_stop(txa_inst_id) ? 1522 txa_dev_queue_stop(txa_inst_id)(txa_inst_id, 1523 eth_dev_id, 1524 tx_queue_id) : 0; 1525 } 1526 return ret; 1527 } 1528 1529 return txa_sw_queue_start_state_set(eth_dev_id, tx_queue_id, 1530 start_state, txa); 1531 } 1532 1533 int 1534 rte_event_eth_tx_adapter_queue_start(uint16_t eth_dev_id, uint16_t tx_queue_id) 1535 { 1536 rte_eventdev_trace_eth_tx_adapter_queue_start(eth_dev_id, tx_queue_id); 1537 1538 return txa_queue_start_state_set(eth_dev_id, tx_queue_id, true); 1539 } 1540 1541 int 1542 rte_event_eth_tx_adapter_queue_stop(uint16_t eth_dev_id, uint16_t tx_queue_id) 1543 { 1544 rte_eventdev_trace_eth_tx_adapter_queue_stop(eth_dev_id, tx_queue_id); 1545 1546 return txa_queue_start_state_set(eth_dev_id, tx_queue_id, false); 1547 } 1548