1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016 Cavium, Inc 3 */ 4 5 #include <ctype.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <errno.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <rte_string_fns.h> 14 #include <rte_log.h> 15 #include <dev_driver.h> 16 #include <rte_memzone.h> 17 #include <rte_eal.h> 18 #include <rte_common.h> 19 #include <rte_malloc.h> 20 #include <rte_errno.h> 21 #include <ethdev_driver.h> 22 #include <rte_cryptodev.h> 23 #include <rte_dmadev.h> 24 #include <cryptodev_pmd.h> 25 #include <rte_telemetry.h> 26 27 #include "rte_eventdev.h" 28 #include "eventdev_pmd.h" 29 #include "eventdev_trace.h" 30 31 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS]; 32 33 struct rte_eventdev *rte_eventdevs = rte_event_devices; 34 35 static struct rte_eventdev_global eventdev_globals = { 36 .nb_devs = 0 37 }; 38 39 /* Public fastpath APIs. */ 40 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS]; 41 42 /* Event dev north bound API implementation */ 43 44 uint8_t 45 rte_event_dev_count(void) 46 { 47 return eventdev_globals.nb_devs; 48 } 49 50 int 51 rte_event_dev_get_dev_id(const char *name) 52 { 53 int i; 54 uint8_t cmp; 55 56 if (!name) 57 return -EINVAL; 58 59 for (i = 0; i < eventdev_globals.nb_devs; i++) { 60 cmp = (strncmp(rte_event_devices[i].data->name, name, 61 RTE_EVENTDEV_NAME_MAX_LEN) == 0) || 62 (rte_event_devices[i].dev ? (strncmp( 63 rte_event_devices[i].dev->driver->name, name, 64 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0); 65 if (cmp && (rte_event_devices[i].attached == 66 RTE_EVENTDEV_ATTACHED)) { 67 rte_eventdev_trace_get_dev_id(name, i); 68 return i; 69 } 70 } 71 return -ENODEV; 72 } 73 74 int 75 rte_event_dev_socket_id(uint8_t dev_id) 76 { 77 struct rte_eventdev *dev; 78 79 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 80 dev = &rte_eventdevs[dev_id]; 81 82 rte_eventdev_trace_socket_id(dev_id, dev, dev->data->socket_id); 83 84 return dev->data->socket_id; 85 } 86 87 int 88 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info) 89 { 90 struct rte_eventdev *dev; 91 92 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 93 dev = &rte_eventdevs[dev_id]; 94 95 if (dev_info == NULL) 96 return -EINVAL; 97 98 memset(dev_info, 0, sizeof(struct rte_event_dev_info)); 99 100 if (*dev->dev_ops->dev_infos_get == NULL) 101 return -ENOTSUP; 102 (*dev->dev_ops->dev_infos_get)(dev, dev_info); 103 104 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns; 105 106 dev_info->dev = dev->dev; 107 if (dev->dev != NULL && dev->dev->driver != NULL) 108 dev_info->driver_name = dev->dev->driver->name; 109 110 rte_eventdev_trace_info_get(dev_id, dev_info, dev_info->dev); 111 112 return 0; 113 } 114 115 int 116 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, 117 uint32_t *caps) 118 { 119 struct rte_eventdev *dev; 120 121 rte_eventdev_trace_eth_rx_adapter_caps_get(dev_id, eth_port_id); 122 123 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 124 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL); 125 126 dev = &rte_eventdevs[dev_id]; 127 128 if (caps == NULL) 129 return -EINVAL; 130 131 if (dev->dev_ops->eth_rx_adapter_caps_get == NULL) 132 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 133 else 134 *caps = 0; 135 136 return dev->dev_ops->eth_rx_adapter_caps_get ? 137 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev, 138 &rte_eth_devices[eth_port_id], 139 caps) 140 : 0; 141 } 142 143 int 144 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps) 145 { 146 struct rte_eventdev *dev; 147 const struct event_timer_adapter_ops *ops; 148 149 rte_eventdev_trace_timer_adapter_caps_get(dev_id); 150 151 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 152 153 dev = &rte_eventdevs[dev_id]; 154 155 if (caps == NULL) 156 return -EINVAL; 157 158 if (dev->dev_ops->timer_adapter_caps_get == NULL) 159 *caps = RTE_EVENT_TIMER_ADAPTER_SW_CAP; 160 else 161 *caps = 0; 162 163 return dev->dev_ops->timer_adapter_caps_get ? 164 (*dev->dev_ops->timer_adapter_caps_get)(dev, 165 0, 166 caps, 167 &ops) 168 : 0; 169 } 170 171 int 172 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, 173 uint32_t *caps) 174 { 175 struct rte_eventdev *dev; 176 struct rte_cryptodev *cdev; 177 178 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 179 if (!rte_cryptodev_is_valid_dev(cdev_id)) 180 return -EINVAL; 181 182 dev = &rte_eventdevs[dev_id]; 183 cdev = rte_cryptodev_pmd_get_dev(cdev_id); 184 185 rte_eventdev_trace_crypto_adapter_caps_get(dev_id, dev, cdev_id, cdev); 186 187 if (caps == NULL) 188 return -EINVAL; 189 190 if (dev->dev_ops->crypto_adapter_caps_get == NULL) 191 *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP; 192 else 193 *caps = 0; 194 195 return dev->dev_ops->crypto_adapter_caps_get ? 196 (*dev->dev_ops->crypto_adapter_caps_get) 197 (dev, cdev, caps) : 0; 198 } 199 200 int 201 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, 202 uint32_t *caps) 203 { 204 struct rte_eventdev *dev; 205 struct rte_eth_dev *eth_dev; 206 207 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 208 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL); 209 210 dev = &rte_eventdevs[dev_id]; 211 eth_dev = &rte_eth_devices[eth_port_id]; 212 213 rte_eventdev_trace_eth_tx_adapter_caps_get(dev_id, dev, eth_port_id, eth_dev); 214 215 if (caps == NULL) 216 return -EINVAL; 217 218 if (dev->dev_ops->eth_tx_adapter_caps_get == NULL) 219 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR; 220 else 221 *caps = 0; 222 223 return dev->dev_ops->eth_tx_adapter_caps_get ? 224 (*dev->dev_ops->eth_tx_adapter_caps_get)(dev, 225 eth_dev, 226 caps) 227 : 0; 228 } 229 230 int 231 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dma_dev_id, uint32_t *caps) 232 { 233 struct rte_eventdev *dev; 234 235 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 236 if (!rte_dma_is_valid(dma_dev_id)) 237 return -EINVAL; 238 239 dev = &rte_eventdevs[dev_id]; 240 241 if (caps == NULL) 242 return -EINVAL; 243 244 *caps = 0; 245 246 if (dev->dev_ops->dma_adapter_caps_get) 247 return (*dev->dev_ops->dma_adapter_caps_get)(dev, dma_dev_id, caps); 248 249 return 0; 250 } 251 252 static inline int 253 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues) 254 { 255 uint8_t old_nb_queues = dev->data->nb_queues; 256 struct rte_event_queue_conf *queues_cfg; 257 unsigned int i; 258 259 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues, 260 dev->data->dev_id); 261 262 if (nb_queues != 0) { 263 queues_cfg = dev->data->queues_cfg; 264 if (*dev->dev_ops->queue_release == NULL) 265 return -ENOTSUP; 266 267 for (i = nb_queues; i < old_nb_queues; i++) 268 (*dev->dev_ops->queue_release)(dev, i); 269 270 271 if (nb_queues > old_nb_queues) { 272 uint8_t new_qs = nb_queues - old_nb_queues; 273 274 memset(queues_cfg + old_nb_queues, 0, 275 sizeof(queues_cfg[0]) * new_qs); 276 } 277 } else { 278 if (*dev->dev_ops->queue_release == NULL) 279 return -ENOTSUP; 280 281 for (i = nb_queues; i < old_nb_queues; i++) 282 (*dev->dev_ops->queue_release)(dev, i); 283 } 284 285 dev->data->nb_queues = nb_queues; 286 return 0; 287 } 288 289 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead) 290 291 static inline int 292 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) 293 { 294 uint8_t old_nb_ports = dev->data->nb_ports; 295 void **ports; 296 uint16_t *links_map; 297 struct rte_event_port_conf *ports_cfg; 298 unsigned int i, j; 299 300 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports, 301 dev->data->dev_id); 302 303 if (nb_ports != 0) { /* re-config */ 304 if (*dev->dev_ops->port_release == NULL) 305 return -ENOTSUP; 306 307 ports = dev->data->ports; 308 ports_cfg = dev->data->ports_cfg; 309 310 for (i = nb_ports; i < old_nb_ports; i++) 311 (*dev->dev_ops->port_release)(ports[i]); 312 313 if (nb_ports > old_nb_ports) { 314 uint8_t new_ps = nb_ports - old_nb_ports; 315 unsigned int old_links_map_end = 316 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; 317 unsigned int links_map_end = 318 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; 319 320 memset(ports + old_nb_ports, 0, 321 sizeof(ports[0]) * new_ps); 322 memset(ports_cfg + old_nb_ports, 0, 323 sizeof(ports_cfg[0]) * new_ps); 324 for (i = 0; i < RTE_EVENT_MAX_PROFILES_PER_PORT; i++) { 325 links_map = dev->data->links_map[i]; 326 for (j = old_links_map_end; j < links_map_end; j++) 327 links_map[j] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 328 } 329 } 330 } else { 331 if (*dev->dev_ops->port_release == NULL) 332 return -ENOTSUP; 333 334 ports = dev->data->ports; 335 for (i = nb_ports; i < old_nb_ports; i++) { 336 (*dev->dev_ops->port_release)(ports[i]); 337 ports[i] = NULL; 338 } 339 } 340 341 dev->data->nb_ports = nb_ports; 342 return 0; 343 } 344 345 int 346 rte_event_dev_configure(uint8_t dev_id, 347 const struct rte_event_dev_config *dev_conf) 348 { 349 struct rte_event_dev_info info; 350 struct rte_eventdev *dev; 351 int diag; 352 353 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 354 dev = &rte_eventdevs[dev_id]; 355 356 if (*dev->dev_ops->dev_infos_get == NULL) 357 return -ENOTSUP; 358 if (*dev->dev_ops->dev_configure == NULL) 359 return -ENOTSUP; 360 361 if (dev->data->dev_started) { 362 RTE_EDEV_LOG_ERR( 363 "device %d must be stopped to allow configuration", dev_id); 364 return -EBUSY; 365 } 366 367 if (dev_conf == NULL) 368 return -EINVAL; 369 370 (*dev->dev_ops->dev_infos_get)(dev, &info); 371 372 /* Check dequeue_timeout_ns value is in limit */ 373 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) { 374 if (dev_conf->dequeue_timeout_ns && 375 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns 376 || dev_conf->dequeue_timeout_ns > 377 info.max_dequeue_timeout_ns)) { 378 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d" 379 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d", 380 dev_id, dev_conf->dequeue_timeout_ns, 381 info.min_dequeue_timeout_ns, 382 info.max_dequeue_timeout_ns); 383 return -EINVAL; 384 } 385 } 386 387 /* Check nb_events_limit is in limit */ 388 if (dev_conf->nb_events_limit > info.max_num_events) { 389 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d", 390 dev_id, dev_conf->nb_events_limit, info.max_num_events); 391 return -EINVAL; 392 } 393 394 /* Check nb_event_queues is in limit */ 395 if (!dev_conf->nb_event_queues) { 396 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero", 397 dev_id); 398 return -EINVAL; 399 } 400 if (dev_conf->nb_event_queues > info.max_event_queues + 401 info.max_single_link_event_port_queue_pairs) { 402 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d", 403 dev_id, dev_conf->nb_event_queues, 404 info.max_event_queues, 405 info.max_single_link_event_port_queue_pairs); 406 return -EINVAL; 407 } 408 if (dev_conf->nb_event_queues - 409 dev_conf->nb_single_link_event_port_queues > 410 info.max_event_queues) { 411 RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d", 412 dev_id, dev_conf->nb_event_queues, 413 dev_conf->nb_single_link_event_port_queues, 414 info.max_event_queues); 415 return -EINVAL; 416 } 417 if (dev_conf->nb_single_link_event_port_queues > 418 dev_conf->nb_event_queues) { 419 RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d", 420 dev_id, 421 dev_conf->nb_single_link_event_port_queues, 422 dev_conf->nb_event_queues); 423 return -EINVAL; 424 } 425 426 /* Check nb_event_ports is in limit */ 427 if (!dev_conf->nb_event_ports) { 428 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id); 429 return -EINVAL; 430 } 431 if (dev_conf->nb_event_ports > info.max_event_ports + 432 info.max_single_link_event_port_queue_pairs) { 433 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d", 434 dev_id, dev_conf->nb_event_ports, 435 info.max_event_ports, 436 info.max_single_link_event_port_queue_pairs); 437 return -EINVAL; 438 } 439 if (dev_conf->nb_event_ports - 440 dev_conf->nb_single_link_event_port_queues 441 > info.max_event_ports) { 442 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d", 443 dev_id, dev_conf->nb_event_ports, 444 dev_conf->nb_single_link_event_port_queues, 445 info.max_event_ports); 446 return -EINVAL; 447 } 448 449 if (dev_conf->nb_single_link_event_port_queues > 450 dev_conf->nb_event_ports) { 451 RTE_EDEV_LOG_ERR( 452 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d", 453 dev_id, 454 dev_conf->nb_single_link_event_port_queues, 455 dev_conf->nb_event_ports); 456 return -EINVAL; 457 } 458 459 /* Check nb_event_queue_flows is in limit */ 460 if (!dev_conf->nb_event_queue_flows) { 461 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id); 462 return -EINVAL; 463 } 464 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) { 465 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x", 466 dev_id, dev_conf->nb_event_queue_flows, 467 info.max_event_queue_flows); 468 return -EINVAL; 469 } 470 471 /* Check nb_event_port_dequeue_depth is in limit */ 472 if (!dev_conf->nb_event_port_dequeue_depth) { 473 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero", 474 dev_id); 475 return -EINVAL; 476 } 477 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && 478 (dev_conf->nb_event_port_dequeue_depth > 479 info.max_event_port_dequeue_depth)) { 480 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d", 481 dev_id, dev_conf->nb_event_port_dequeue_depth, 482 info.max_event_port_dequeue_depth); 483 return -EINVAL; 484 } 485 486 /* Check nb_event_port_enqueue_depth is in limit */ 487 if (!dev_conf->nb_event_port_enqueue_depth) { 488 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero", 489 dev_id); 490 return -EINVAL; 491 } 492 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && 493 (dev_conf->nb_event_port_enqueue_depth > 494 info.max_event_port_enqueue_depth)) { 495 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d", 496 dev_id, dev_conf->nb_event_port_enqueue_depth, 497 info.max_event_port_enqueue_depth); 498 return -EINVAL; 499 } 500 501 /* Copy the dev_conf parameter into the dev structure */ 502 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); 503 504 /* Setup new number of queues and reconfigure device. */ 505 diag = event_dev_queue_config(dev, dev_conf->nb_event_queues); 506 if (diag != 0) { 507 RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id, 508 diag); 509 return diag; 510 } 511 512 /* Setup new number of ports and reconfigure device. */ 513 diag = event_dev_port_config(dev, dev_conf->nb_event_ports); 514 if (diag != 0) { 515 event_dev_queue_config(dev, 0); 516 RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id, 517 diag); 518 return diag; 519 } 520 521 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id); 522 523 /* Configure the device */ 524 diag = (*dev->dev_ops->dev_configure)(dev); 525 if (diag != 0) { 526 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag); 527 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id); 528 event_dev_queue_config(dev, 0); 529 event_dev_port_config(dev, 0); 530 } 531 532 dev->data->event_dev_cap = info.event_dev_cap; 533 rte_eventdev_trace_configure(dev_id, dev_conf, diag); 534 return diag; 535 } 536 537 static inline int 538 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id) 539 { 540 if (queue_id < dev->data->nb_queues && queue_id < 541 RTE_EVENT_MAX_QUEUES_PER_DEV) 542 return 1; 543 else 544 return 0; 545 } 546 547 int 548 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, 549 struct rte_event_queue_conf *queue_conf) 550 { 551 struct rte_eventdev *dev; 552 553 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 554 dev = &rte_eventdevs[dev_id]; 555 556 if (queue_conf == NULL) 557 return -EINVAL; 558 559 if (!is_valid_queue(dev, queue_id)) { 560 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 561 return -EINVAL; 562 } 563 564 if (*dev->dev_ops->queue_def_conf == NULL) 565 return -ENOTSUP; 566 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf)); 567 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf); 568 569 rte_eventdev_trace_queue_default_conf_get(dev_id, dev, queue_id, queue_conf); 570 571 return 0; 572 } 573 574 static inline int 575 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf) 576 { 577 if (queue_conf && 578 !(queue_conf->event_queue_cfg & 579 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && 580 ((queue_conf->event_queue_cfg & 581 RTE_EVENT_QUEUE_CFG_ALL_TYPES) || 582 (queue_conf->schedule_type 583 == RTE_SCHED_TYPE_ATOMIC) 584 )) 585 return 1; 586 else 587 return 0; 588 } 589 590 static inline int 591 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf) 592 { 593 if (queue_conf && 594 !(queue_conf->event_queue_cfg & 595 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && 596 ((queue_conf->event_queue_cfg & 597 RTE_EVENT_QUEUE_CFG_ALL_TYPES) || 598 (queue_conf->schedule_type 599 == RTE_SCHED_TYPE_ORDERED) 600 )) 601 return 1; 602 else 603 return 0; 604 } 605 606 607 int 608 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, 609 const struct rte_event_queue_conf *queue_conf) 610 { 611 struct rte_eventdev *dev; 612 struct rte_event_queue_conf def_conf; 613 614 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 615 dev = &rte_eventdevs[dev_id]; 616 617 if (!is_valid_queue(dev, queue_id)) { 618 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 619 return -EINVAL; 620 } 621 622 /* Check nb_atomic_flows limit */ 623 if (is_valid_atomic_queue_conf(queue_conf)) { 624 if (queue_conf->nb_atomic_flows == 0 || 625 queue_conf->nb_atomic_flows > 626 dev->data->dev_conf.nb_event_queue_flows) { 627 RTE_EDEV_LOG_ERR( 628 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d", 629 dev_id, queue_id, queue_conf->nb_atomic_flows, 630 dev->data->dev_conf.nb_event_queue_flows); 631 return -EINVAL; 632 } 633 } 634 635 /* Check nb_atomic_order_sequences limit */ 636 if (is_valid_ordered_queue_conf(queue_conf)) { 637 if (queue_conf->nb_atomic_order_sequences == 0 || 638 queue_conf->nb_atomic_order_sequences > 639 dev->data->dev_conf.nb_event_queue_flows) { 640 RTE_EDEV_LOG_ERR( 641 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d", 642 dev_id, queue_id, queue_conf->nb_atomic_order_sequences, 643 dev->data->dev_conf.nb_event_queue_flows); 644 return -EINVAL; 645 } 646 } 647 648 if (dev->data->dev_started) { 649 RTE_EDEV_LOG_ERR( 650 "device %d must be stopped to allow queue setup", dev_id); 651 return -EBUSY; 652 } 653 654 if (*dev->dev_ops->queue_setup == NULL) 655 return -ENOTSUP; 656 657 if (queue_conf == NULL) { 658 if (*dev->dev_ops->queue_def_conf == NULL) 659 return -ENOTSUP; 660 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf); 661 queue_conf = &def_conf; 662 } 663 664 dev->data->queues_cfg[queue_id] = *queue_conf; 665 rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf); 666 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf); 667 } 668 669 static inline int 670 is_valid_port(struct rte_eventdev *dev, uint8_t port_id) 671 { 672 if (port_id < dev->data->nb_ports) 673 return 1; 674 else 675 return 0; 676 } 677 678 int 679 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, 680 struct rte_event_port_conf *port_conf) 681 { 682 struct rte_eventdev *dev; 683 684 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 685 dev = &rte_eventdevs[dev_id]; 686 687 if (port_conf == NULL) 688 return -EINVAL; 689 690 if (!is_valid_port(dev, port_id)) { 691 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 692 return -EINVAL; 693 } 694 695 if (*dev->dev_ops->port_def_conf == NULL) 696 return -ENOTSUP; 697 memset(port_conf, 0, sizeof(struct rte_event_port_conf)); 698 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf); 699 700 rte_eventdev_trace_port_default_conf_get(dev_id, dev, port_id, port_conf); 701 702 return 0; 703 } 704 705 int 706 rte_event_port_setup(uint8_t dev_id, uint8_t port_id, 707 const struct rte_event_port_conf *port_conf) 708 { 709 struct rte_eventdev *dev; 710 struct rte_event_port_conf def_conf; 711 int diag; 712 713 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 714 dev = &rte_eventdevs[dev_id]; 715 716 if (!is_valid_port(dev, port_id)) { 717 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 718 return -EINVAL; 719 } 720 721 /* Check new_event_threshold limit */ 722 if ((port_conf && !port_conf->new_event_threshold) || 723 (port_conf && port_conf->new_event_threshold > 724 dev->data->dev_conf.nb_events_limit)) { 725 RTE_EDEV_LOG_ERR( 726 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d", 727 dev_id, port_id, port_conf->new_event_threshold, 728 dev->data->dev_conf.nb_events_limit); 729 return -EINVAL; 730 } 731 732 /* Check dequeue_depth limit */ 733 if ((port_conf && !port_conf->dequeue_depth) || 734 (port_conf && port_conf->dequeue_depth > 735 dev->data->dev_conf.nb_event_port_dequeue_depth)) { 736 RTE_EDEV_LOG_ERR( 737 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d", 738 dev_id, port_id, port_conf->dequeue_depth, 739 dev->data->dev_conf.nb_event_port_dequeue_depth); 740 return -EINVAL; 741 } 742 743 /* Check enqueue_depth limit */ 744 if ((port_conf && !port_conf->enqueue_depth) || 745 (port_conf && port_conf->enqueue_depth > 746 dev->data->dev_conf.nb_event_port_enqueue_depth)) { 747 RTE_EDEV_LOG_ERR( 748 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d", 749 dev_id, port_id, port_conf->enqueue_depth, 750 dev->data->dev_conf.nb_event_port_enqueue_depth); 751 return -EINVAL; 752 } 753 754 if (port_conf && 755 (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) && 756 !(dev->data->event_dev_cap & 757 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) { 758 RTE_EDEV_LOG_ERR( 759 "dev%d port%d Implicit release disable not supported", 760 dev_id, port_id); 761 return -EINVAL; 762 } 763 764 if (dev->data->dev_started) { 765 RTE_EDEV_LOG_ERR( 766 "device %d must be stopped to allow port setup", dev_id); 767 return -EBUSY; 768 } 769 770 if (*dev->dev_ops->port_setup == NULL) 771 return -ENOTSUP; 772 773 if (port_conf == NULL) { 774 if (*dev->dev_ops->port_def_conf == NULL) 775 return -ENOTSUP; 776 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf); 777 port_conf = &def_conf; 778 } 779 780 dev->data->ports_cfg[port_id] = *port_conf; 781 782 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf); 783 784 /* Unlink all the queues from this port(default state after setup) */ 785 if (!diag) 786 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0); 787 788 rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag); 789 if (diag < 0) 790 return diag; 791 792 return 0; 793 } 794 795 void 796 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, 797 rte_eventdev_port_flush_t release_cb, void *args) 798 { 799 struct rte_eventdev *dev; 800 801 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id); 802 dev = &rte_eventdevs[dev_id]; 803 804 rte_eventdev_trace_port_quiesce(dev_id, dev, port_id, args); 805 806 if (!is_valid_port(dev, port_id)) { 807 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 808 return; 809 } 810 811 if (dev->dev_ops->port_quiesce) 812 (*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id], 813 release_cb, args); 814 } 815 816 int 817 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, 818 uint32_t *attr_value) 819 { 820 struct rte_eventdev *dev; 821 822 if (!attr_value) 823 return -EINVAL; 824 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 825 dev = &rte_eventdevs[dev_id]; 826 827 switch (attr_id) { 828 case RTE_EVENT_DEV_ATTR_PORT_COUNT: 829 *attr_value = dev->data->nb_ports; 830 break; 831 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT: 832 *attr_value = dev->data->nb_queues; 833 break; 834 case RTE_EVENT_DEV_ATTR_STARTED: 835 *attr_value = dev->data->dev_started; 836 break; 837 default: 838 return -EINVAL; 839 } 840 841 rte_eventdev_trace_attr_get(dev_id, dev, attr_id, *attr_value); 842 843 return 0; 844 } 845 846 int 847 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, 848 uint32_t *attr_value) 849 { 850 struct rte_eventdev *dev; 851 852 if (!attr_value) 853 return -EINVAL; 854 855 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 856 dev = &rte_eventdevs[dev_id]; 857 if (!is_valid_port(dev, port_id)) { 858 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 859 return -EINVAL; 860 } 861 862 switch (attr_id) { 863 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH: 864 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth; 865 break; 866 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH: 867 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth; 868 break; 869 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD: 870 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold; 871 break; 872 case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE: 873 { 874 uint32_t config; 875 876 config = dev->data->ports_cfg[port_id].event_port_cfg; 877 *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL); 878 break; 879 } 880 default: 881 return -EINVAL; 882 }; 883 884 rte_eventdev_trace_port_attr_get(dev_id, dev, port_id, attr_id, *attr_value); 885 886 return 0; 887 } 888 889 int 890 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, 891 uint32_t *attr_value) 892 { 893 struct rte_event_queue_conf *conf; 894 struct rte_eventdev *dev; 895 896 if (!attr_value) 897 return -EINVAL; 898 899 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 900 dev = &rte_eventdevs[dev_id]; 901 if (!is_valid_queue(dev, queue_id)) { 902 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 903 return -EINVAL; 904 } 905 906 conf = &dev->data->queues_cfg[queue_id]; 907 908 switch (attr_id) { 909 case RTE_EVENT_QUEUE_ATTR_PRIORITY: 910 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL; 911 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) 912 *attr_value = conf->priority; 913 break; 914 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS: 915 *attr_value = conf->nb_atomic_flows; 916 break; 917 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES: 918 *attr_value = conf->nb_atomic_order_sequences; 919 break; 920 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG: 921 *attr_value = conf->event_queue_cfg; 922 break; 923 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE: 924 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) 925 return -EOVERFLOW; 926 927 *attr_value = conf->schedule_type; 928 break; 929 case RTE_EVENT_QUEUE_ATTR_WEIGHT: 930 *attr_value = RTE_EVENT_QUEUE_WEIGHT_LOWEST; 931 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) 932 *attr_value = conf->weight; 933 break; 934 case RTE_EVENT_QUEUE_ATTR_AFFINITY: 935 *attr_value = RTE_EVENT_QUEUE_AFFINITY_LOWEST; 936 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) 937 *attr_value = conf->affinity; 938 break; 939 default: 940 return -EINVAL; 941 }; 942 943 rte_eventdev_trace_queue_attr_get(dev_id, dev, queue_id, attr_id, *attr_value); 944 945 return 0; 946 } 947 948 int 949 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, 950 uint64_t attr_value) 951 { 952 struct rte_eventdev *dev; 953 954 rte_eventdev_trace_queue_attr_set(dev_id, queue_id, attr_id, attr_value); 955 956 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 957 dev = &rte_eventdevs[dev_id]; 958 if (!is_valid_queue(dev, queue_id)) { 959 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 960 return -EINVAL; 961 } 962 963 if (!(dev->data->event_dev_cap & 964 RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR)) { 965 RTE_EDEV_LOG_ERR( 966 "Device %" PRIu8 "does not support changing queue attributes at runtime", 967 dev_id); 968 return -ENOTSUP; 969 } 970 971 if (*dev->dev_ops->queue_attr_set == NULL) 972 return -ENOTSUP; 973 return (*dev->dev_ops->queue_attr_set)(dev, queue_id, attr_id, 974 attr_value); 975 } 976 977 int 978 rte_event_port_link(uint8_t dev_id, uint8_t port_id, 979 const uint8_t queues[], const uint8_t priorities[], 980 uint16_t nb_links) 981 { 982 return rte_event_port_profile_links_set(dev_id, port_id, queues, priorities, nb_links, 0); 983 } 984 985 int 986 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], 987 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id) 988 { 989 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV]; 990 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV]; 991 struct rte_event_dev_info info; 992 struct rte_eventdev *dev; 993 uint16_t *links_map; 994 int i, diag; 995 996 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); 997 dev = &rte_eventdevs[dev_id]; 998 999 if (*dev->dev_ops->dev_infos_get == NULL) 1000 return -ENOTSUP; 1001 1002 (*dev->dev_ops->dev_infos_get)(dev, &info); 1003 if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT || 1004 profile_id >= info.max_profiles_per_port) { 1005 RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id); 1006 return -EINVAL; 1007 } 1008 1009 if (*dev->dev_ops->port_link == NULL) { 1010 RTE_EDEV_LOG_ERR("Function not supported\n"); 1011 rte_errno = ENOTSUP; 1012 return 0; 1013 } 1014 1015 if (profile_id && *dev->dev_ops->port_link_profile == NULL) { 1016 RTE_EDEV_LOG_ERR("Function not supported\n"); 1017 rte_errno = ENOTSUP; 1018 return 0; 1019 } 1020 1021 if (!is_valid_port(dev, port_id)) { 1022 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 1023 rte_errno = EINVAL; 1024 return 0; 1025 } 1026 1027 if (queues == NULL) { 1028 for (i = 0; i < dev->data->nb_queues; i++) 1029 queues_list[i] = i; 1030 1031 queues = queues_list; 1032 nb_links = dev->data->nb_queues; 1033 } 1034 1035 if (priorities == NULL) { 1036 for (i = 0; i < nb_links; i++) 1037 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL; 1038 1039 priorities = priorities_list; 1040 } 1041 1042 for (i = 0; i < nb_links; i++) 1043 if (queues[i] >= dev->data->nb_queues) { 1044 rte_errno = EINVAL; 1045 return 0; 1046 } 1047 1048 if (profile_id) 1049 diag = (*dev->dev_ops->port_link_profile)(dev, dev->data->ports[port_id], queues, 1050 priorities, nb_links, profile_id); 1051 else 1052 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], queues, 1053 priorities, nb_links); 1054 if (diag < 0) 1055 return diag; 1056 1057 links_map = dev->data->links_map[profile_id]; 1058 /* Point links_map to this port specific area */ 1059 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 1060 for (i = 0; i < diag; i++) 1061 links_map[queues[i]] = (uint8_t)priorities[i]; 1062 1063 rte_eventdev_trace_port_profile_links_set(dev_id, port_id, nb_links, profile_id, diag); 1064 return diag; 1065 } 1066 1067 int 1068 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, 1069 uint8_t queues[], uint16_t nb_unlinks) 1070 { 1071 return rte_event_port_profile_unlink(dev_id, port_id, queues, nb_unlinks, 0); 1072 } 1073 1074 int 1075 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], 1076 uint16_t nb_unlinks, uint8_t profile_id) 1077 { 1078 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 1079 struct rte_event_dev_info info; 1080 struct rte_eventdev *dev; 1081 uint16_t *links_map; 1082 int i, diag, j; 1083 1084 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); 1085 dev = &rte_eventdevs[dev_id]; 1086 1087 if (*dev->dev_ops->dev_infos_get == NULL) 1088 return -ENOTSUP; 1089 1090 (*dev->dev_ops->dev_infos_get)(dev, &info); 1091 if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT || 1092 profile_id >= info.max_profiles_per_port) { 1093 RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id); 1094 return -EINVAL; 1095 } 1096 1097 if (*dev->dev_ops->port_unlink == NULL) { 1098 RTE_EDEV_LOG_ERR("Function not supported"); 1099 rte_errno = ENOTSUP; 1100 return 0; 1101 } 1102 1103 if (profile_id && *dev->dev_ops->port_unlink_profile == NULL) { 1104 RTE_EDEV_LOG_ERR("Function not supported"); 1105 rte_errno = ENOTSUP; 1106 return 0; 1107 } 1108 1109 if (!is_valid_port(dev, port_id)) { 1110 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 1111 rte_errno = EINVAL; 1112 return 0; 1113 } 1114 1115 links_map = dev->data->links_map[profile_id]; 1116 /* Point links_map to this port specific area */ 1117 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 1118 1119 if (queues == NULL) { 1120 j = 0; 1121 for (i = 0; i < dev->data->nb_queues; i++) { 1122 if (links_map[i] != 1123 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { 1124 all_queues[j] = i; 1125 j++; 1126 } 1127 } 1128 queues = all_queues; 1129 } else { 1130 for (j = 0; j < nb_unlinks; j++) { 1131 if (links_map[queues[j]] == 1132 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) 1133 break; 1134 } 1135 } 1136 1137 nb_unlinks = j; 1138 for (i = 0; i < nb_unlinks; i++) 1139 if (queues[i] >= dev->data->nb_queues) { 1140 rte_errno = EINVAL; 1141 return 0; 1142 } 1143 1144 if (profile_id) 1145 diag = (*dev->dev_ops->port_unlink_profile)(dev, dev->data->ports[port_id], queues, 1146 nb_unlinks, profile_id); 1147 else 1148 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], queues, 1149 nb_unlinks); 1150 if (diag < 0) 1151 return diag; 1152 1153 for (i = 0; i < diag; i++) 1154 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 1155 1156 rte_eventdev_trace_port_profile_unlink(dev_id, port_id, nb_unlinks, profile_id, diag); 1157 return diag; 1158 } 1159 1160 int 1161 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id) 1162 { 1163 struct rte_eventdev *dev; 1164 1165 rte_eventdev_trace_port_unlinks_in_progress(dev_id, port_id); 1166 1167 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1168 dev = &rte_eventdevs[dev_id]; 1169 if (!is_valid_port(dev, port_id)) { 1170 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 1171 return -EINVAL; 1172 } 1173 1174 /* Return 0 if the PMD does not implement unlinks in progress. 1175 * This allows PMDs which handle unlink synchronously to not implement 1176 * this function at all. 1177 */ 1178 if (*dev->dev_ops->port_unlinks_in_progress == NULL) 1179 return 0; 1180 1181 return (*dev->dev_ops->port_unlinks_in_progress)(dev, 1182 dev->data->ports[port_id]); 1183 } 1184 1185 int 1186 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, 1187 uint8_t queues[], uint8_t priorities[]) 1188 { 1189 struct rte_eventdev *dev; 1190 uint16_t *links_map; 1191 int i, count = 0; 1192 1193 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1194 dev = &rte_eventdevs[dev_id]; 1195 if (!is_valid_port(dev, port_id)) { 1196 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 1197 return -EINVAL; 1198 } 1199 1200 /* Use the default profile_id. */ 1201 links_map = dev->data->links_map[0]; 1202 /* Point links_map to this port specific area */ 1203 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 1204 for (i = 0; i < dev->data->nb_queues; i++) { 1205 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { 1206 queues[count] = i; 1207 priorities[count] = (uint8_t)links_map[i]; 1208 ++count; 1209 } 1210 } 1211 1212 rte_eventdev_trace_port_links_get(dev_id, port_id, count); 1213 1214 return count; 1215 } 1216 1217 int 1218 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], 1219 uint8_t priorities[], uint8_t profile_id) 1220 { 1221 struct rte_event_dev_info info; 1222 struct rte_eventdev *dev; 1223 uint16_t *links_map; 1224 int i, count = 0; 1225 1226 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1227 1228 dev = &rte_eventdevs[dev_id]; 1229 if (*dev->dev_ops->dev_infos_get == NULL) 1230 return -ENOTSUP; 1231 1232 (*dev->dev_ops->dev_infos_get)(dev, &info); 1233 if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT || 1234 profile_id >= info.max_profiles_per_port) { 1235 RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id); 1236 return -EINVAL; 1237 } 1238 1239 if (!is_valid_port(dev, port_id)) { 1240 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 1241 return -EINVAL; 1242 } 1243 1244 links_map = dev->data->links_map[profile_id]; 1245 /* Point links_map to this port specific area */ 1246 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 1247 for (i = 0; i < dev->data->nb_queues; i++) { 1248 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { 1249 queues[count] = i; 1250 priorities[count] = (uint8_t)links_map[i]; 1251 ++count; 1252 } 1253 } 1254 1255 rte_eventdev_trace_port_profile_links_get(dev_id, port_id, profile_id, count); 1256 1257 return count; 1258 } 1259 1260 int 1261 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, 1262 uint64_t *timeout_ticks) 1263 { 1264 struct rte_eventdev *dev; 1265 1266 rte_eventdev_trace_dequeue_timeout_ticks(dev_id, ns, timeout_ticks); 1267 1268 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1269 dev = &rte_eventdevs[dev_id]; 1270 if (*dev->dev_ops->timeout_ticks == NULL) 1271 return -ENOTSUP; 1272 1273 if (timeout_ticks == NULL) 1274 return -EINVAL; 1275 1276 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks); 1277 } 1278 1279 int 1280 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id) 1281 { 1282 struct rte_eventdev *dev; 1283 1284 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1285 dev = &rte_eventdevs[dev_id]; 1286 1287 if (service_id == NULL) 1288 return -EINVAL; 1289 1290 if (dev->data->service_inited) 1291 *service_id = dev->data->service_id; 1292 1293 rte_eventdev_trace_service_id_get(dev_id, *service_id); 1294 1295 return dev->data->service_inited ? 0 : -ESRCH; 1296 } 1297 1298 int 1299 rte_event_dev_dump(uint8_t dev_id, FILE *f) 1300 { 1301 struct rte_eventdev *dev; 1302 1303 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1304 dev = &rte_eventdevs[dev_id]; 1305 if (*dev->dev_ops->dump == NULL) 1306 return -ENOTSUP; 1307 if (f == NULL) 1308 return -EINVAL; 1309 1310 (*dev->dev_ops->dump)(dev, f); 1311 return 0; 1312 1313 } 1314 1315 static int 1316 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, 1317 uint8_t queue_port_id) 1318 { 1319 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1320 if (dev->dev_ops->xstats_get_names != NULL) 1321 return (*dev->dev_ops->xstats_get_names)(dev, mode, 1322 queue_port_id, 1323 NULL, NULL, 0); 1324 return 0; 1325 } 1326 1327 int 1328 rte_event_dev_xstats_names_get(uint8_t dev_id, 1329 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, 1330 struct rte_event_dev_xstats_name *xstats_names, 1331 uint64_t *ids, unsigned int size) 1332 { 1333 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV); 1334 const int cnt_expected_entries = xstats_get_count(dev_id, mode, 1335 queue_port_id); 1336 if (xstats_names == NULL || cnt_expected_entries < 0 || 1337 (int)size < cnt_expected_entries) 1338 return cnt_expected_entries; 1339 1340 /* dev_id checked above */ 1341 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1342 1343 if (dev->dev_ops->xstats_get_names != NULL) 1344 return (*dev->dev_ops->xstats_get_names)(dev, mode, 1345 queue_port_id, xstats_names, ids, size); 1346 1347 return -ENOTSUP; 1348 } 1349 1350 /* retrieve eventdev extended statistics */ 1351 int 1352 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, 1353 uint8_t queue_port_id, const uint64_t ids[], 1354 uint64_t values[], unsigned int n) 1355 { 1356 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV); 1357 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1358 1359 /* implemented by the driver */ 1360 if (dev->dev_ops->xstats_get != NULL) 1361 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id, 1362 ids, values, n); 1363 return -ENOTSUP; 1364 } 1365 1366 uint64_t 1367 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, 1368 uint64_t *id) 1369 { 1370 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0); 1371 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1372 uint64_t temp = -1; 1373 1374 if (id != NULL) 1375 *id = (unsigned int)-1; 1376 else 1377 id = &temp; /* ensure driver never gets a NULL value */ 1378 1379 /* implemented by driver */ 1380 if (dev->dev_ops->xstats_get_by_name != NULL) 1381 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id); 1382 return -ENOTSUP; 1383 } 1384 1385 int rte_event_dev_xstats_reset(uint8_t dev_id, 1386 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, 1387 const uint64_t ids[], uint32_t nb_ids) 1388 { 1389 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1390 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1391 1392 if (dev->dev_ops->xstats_reset != NULL) 1393 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id, 1394 ids, nb_ids); 1395 return -ENOTSUP; 1396 } 1397 1398 int rte_event_pmd_selftest_seqn_dynfield_offset = -1; 1399 1400 int rte_event_dev_selftest(uint8_t dev_id) 1401 { 1402 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1403 static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = { 1404 .name = "rte_event_pmd_selftest_seqn_dynfield", 1405 .size = sizeof(rte_event_pmd_selftest_seqn_t), 1406 .align = __alignof__(rte_event_pmd_selftest_seqn_t), 1407 }; 1408 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1409 1410 if (dev->dev_ops->dev_selftest != NULL) { 1411 rte_event_pmd_selftest_seqn_dynfield_offset = 1412 rte_mbuf_dynfield_register(&test_seqn_dynfield_desc); 1413 if (rte_event_pmd_selftest_seqn_dynfield_offset < 0) 1414 return -ENOMEM; 1415 return (*dev->dev_ops->dev_selftest)(); 1416 } 1417 return -ENOTSUP; 1418 } 1419 1420 struct rte_mempool * 1421 rte_event_vector_pool_create(const char *name, unsigned int n, 1422 unsigned int cache_size, uint16_t nb_elem, 1423 int socket_id) 1424 { 1425 const char *mp_ops_name; 1426 struct rte_mempool *mp; 1427 unsigned int elt_sz; 1428 int ret; 1429 1430 if (!nb_elem) { 1431 RTE_LOG(ERR, EVENTDEV, 1432 "Invalid number of elements=%d requested\n", nb_elem); 1433 rte_errno = EINVAL; 1434 return NULL; 1435 } 1436 1437 elt_sz = 1438 sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t)); 1439 mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id, 1440 0); 1441 if (mp == NULL) 1442 return NULL; 1443 1444 mp_ops_name = rte_mbuf_best_mempool_ops(); 1445 ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL); 1446 if (ret != 0) { 1447 RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n"); 1448 goto err; 1449 } 1450 1451 ret = rte_mempool_populate_default(mp); 1452 if (ret < 0) 1453 goto err; 1454 1455 rte_eventdev_trace_vector_pool_create(mp, mp->name, mp->socket_id, 1456 mp->size, mp->cache_size, mp->elt_size); 1457 1458 return mp; 1459 err: 1460 rte_mempool_free(mp); 1461 rte_errno = -ret; 1462 return NULL; 1463 } 1464 1465 int 1466 rte_event_dev_start(uint8_t dev_id) 1467 { 1468 struct rte_eventdev *dev; 1469 int diag; 1470 1471 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id); 1472 1473 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1474 dev = &rte_eventdevs[dev_id]; 1475 if (*dev->dev_ops->dev_start == NULL) 1476 return -ENOTSUP; 1477 1478 if (dev->data->dev_started != 0) { 1479 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started", 1480 dev_id); 1481 return 0; 1482 } 1483 1484 diag = (*dev->dev_ops->dev_start)(dev); 1485 rte_eventdev_trace_start(dev_id, diag); 1486 if (diag == 0) 1487 dev->data->dev_started = 1; 1488 else 1489 return diag; 1490 1491 event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev); 1492 1493 return 0; 1494 } 1495 1496 int 1497 rte_event_dev_stop_flush_callback_register(uint8_t dev_id, 1498 rte_eventdev_stop_flush_t callback, 1499 void *userdata) 1500 { 1501 struct rte_eventdev *dev; 1502 1503 RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id); 1504 1505 rte_eventdev_trace_stop_flush_callback_register(dev_id, callback, userdata); 1506 1507 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1508 dev = &rte_eventdevs[dev_id]; 1509 1510 dev->dev_ops->dev_stop_flush = callback; 1511 dev->data->dev_stop_flush_arg = userdata; 1512 1513 return 0; 1514 } 1515 1516 void 1517 rte_event_dev_stop(uint8_t dev_id) 1518 { 1519 struct rte_eventdev *dev; 1520 1521 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id); 1522 1523 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id); 1524 dev = &rte_eventdevs[dev_id]; 1525 if (*dev->dev_ops->dev_stop == NULL) 1526 return; 1527 1528 if (dev->data->dev_started == 0) { 1529 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped", 1530 dev_id); 1531 return; 1532 } 1533 1534 dev->data->dev_started = 0; 1535 (*dev->dev_ops->dev_stop)(dev); 1536 rte_eventdev_trace_stop(dev_id); 1537 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id); 1538 } 1539 1540 int 1541 rte_event_dev_close(uint8_t dev_id) 1542 { 1543 struct rte_eventdev *dev; 1544 1545 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1546 dev = &rte_eventdevs[dev_id]; 1547 if (*dev->dev_ops->dev_close == NULL) 1548 return -ENOTSUP; 1549 1550 /* Device must be stopped before it can be closed */ 1551 if (dev->data->dev_started == 1) { 1552 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing", 1553 dev_id); 1554 return -EBUSY; 1555 } 1556 1557 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id); 1558 rte_eventdev_trace_close(dev_id); 1559 return (*dev->dev_ops->dev_close)(dev); 1560 } 1561 1562 static inline int 1563 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data, 1564 int socket_id) 1565 { 1566 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; 1567 const struct rte_memzone *mz; 1568 int i, n; 1569 1570 /* Generate memzone name */ 1571 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id); 1572 if (n >= (int)sizeof(mz_name)) 1573 return -EINVAL; 1574 1575 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1576 mz = rte_memzone_reserve(mz_name, 1577 sizeof(struct rte_eventdev_data), 1578 socket_id, 0); 1579 } else 1580 mz = rte_memzone_lookup(mz_name); 1581 1582 if (mz == NULL) 1583 return -ENOMEM; 1584 1585 *data = mz->addr; 1586 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1587 memset(*data, 0, sizeof(struct rte_eventdev_data)); 1588 for (i = 0; i < RTE_EVENT_MAX_PROFILES_PER_PORT; i++) 1589 for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV * RTE_EVENT_MAX_QUEUES_PER_DEV; 1590 n++) 1591 (*data)->links_map[i][n] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 1592 } 1593 1594 return 0; 1595 } 1596 1597 static inline uint8_t 1598 eventdev_find_free_device_index(void) 1599 { 1600 uint8_t dev_id; 1601 1602 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) { 1603 if (rte_eventdevs[dev_id].attached == 1604 RTE_EVENTDEV_DETACHED) 1605 return dev_id; 1606 } 1607 return RTE_EVENT_MAX_DEVS; 1608 } 1609 1610 struct rte_eventdev * 1611 rte_event_pmd_allocate(const char *name, int socket_id) 1612 { 1613 struct rte_eventdev *eventdev; 1614 uint8_t dev_id; 1615 1616 if (rte_event_pmd_get_named_dev(name) != NULL) { 1617 RTE_EDEV_LOG_ERR("Event device with name %s already " 1618 "allocated!", name); 1619 return NULL; 1620 } 1621 1622 dev_id = eventdev_find_free_device_index(); 1623 if (dev_id == RTE_EVENT_MAX_DEVS) { 1624 RTE_EDEV_LOG_ERR("Reached maximum number of event devices"); 1625 return NULL; 1626 } 1627 1628 eventdev = &rte_eventdevs[dev_id]; 1629 1630 if (eventdev->data == NULL) { 1631 struct rte_eventdev_data *eventdev_data = NULL; 1632 1633 int retval = 1634 eventdev_data_alloc(dev_id, &eventdev_data, socket_id); 1635 1636 if (retval < 0 || eventdev_data == NULL) 1637 return NULL; 1638 1639 eventdev->data = eventdev_data; 1640 1641 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1642 1643 strlcpy(eventdev->data->name, name, 1644 RTE_EVENTDEV_NAME_MAX_LEN); 1645 1646 eventdev->data->dev_id = dev_id; 1647 eventdev->data->socket_id = socket_id; 1648 eventdev->data->dev_started = 0; 1649 } 1650 1651 eventdev->attached = RTE_EVENTDEV_ATTACHED; 1652 eventdev_globals.nb_devs++; 1653 } 1654 1655 return eventdev; 1656 } 1657 1658 int 1659 rte_event_pmd_release(struct rte_eventdev *eventdev) 1660 { 1661 int ret; 1662 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; 1663 const struct rte_memzone *mz; 1664 1665 if (eventdev == NULL) 1666 return -EINVAL; 1667 1668 event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id); 1669 eventdev->attached = RTE_EVENTDEV_DETACHED; 1670 eventdev_globals.nb_devs--; 1671 1672 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1673 rte_free(eventdev->data->dev_private); 1674 1675 /* Generate memzone name */ 1676 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", 1677 eventdev->data->dev_id); 1678 if (ret >= (int)sizeof(mz_name)) 1679 return -EINVAL; 1680 1681 mz = rte_memzone_lookup(mz_name); 1682 if (mz == NULL) 1683 return -ENOMEM; 1684 1685 ret = rte_memzone_free(mz); 1686 if (ret) 1687 return ret; 1688 } 1689 1690 eventdev->data = NULL; 1691 return 0; 1692 } 1693 1694 void 1695 event_dev_probing_finish(struct rte_eventdev *eventdev) 1696 { 1697 if (eventdev == NULL) 1698 return; 1699 1700 event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id, 1701 eventdev); 1702 } 1703 1704 static int 1705 handle_dev_list(const char *cmd __rte_unused, 1706 const char *params __rte_unused, 1707 struct rte_tel_data *d) 1708 { 1709 uint8_t dev_id; 1710 int ndev = rte_event_dev_count(); 1711 1712 if (ndev < 1) 1713 return -1; 1714 1715 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 1716 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) { 1717 if (rte_eventdevs[dev_id].attached == 1718 RTE_EVENTDEV_ATTACHED) 1719 rte_tel_data_add_array_int(d, dev_id); 1720 } 1721 1722 return 0; 1723 } 1724 1725 static int 1726 handle_port_list(const char *cmd __rte_unused, 1727 const char *params, 1728 struct rte_tel_data *d) 1729 { 1730 int i; 1731 uint8_t dev_id; 1732 struct rte_eventdev *dev; 1733 char *end_param; 1734 1735 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1736 return -1; 1737 1738 dev_id = strtoul(params, &end_param, 10); 1739 if (*end_param != '\0') 1740 RTE_EDEV_LOG_DEBUG( 1741 "Extra parameters passed to eventdev telemetry command, ignoring"); 1742 1743 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1744 dev = &rte_eventdevs[dev_id]; 1745 1746 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 1747 for (i = 0; i < dev->data->nb_ports; i++) 1748 rte_tel_data_add_array_int(d, i); 1749 1750 return 0; 1751 } 1752 1753 static int 1754 handle_queue_list(const char *cmd __rte_unused, 1755 const char *params, 1756 struct rte_tel_data *d) 1757 { 1758 int i; 1759 uint8_t dev_id; 1760 struct rte_eventdev *dev; 1761 char *end_param; 1762 1763 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1764 return -1; 1765 1766 dev_id = strtoul(params, &end_param, 10); 1767 if (*end_param != '\0') 1768 RTE_EDEV_LOG_DEBUG( 1769 "Extra parameters passed to eventdev telemetry command, ignoring"); 1770 1771 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1772 dev = &rte_eventdevs[dev_id]; 1773 1774 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 1775 for (i = 0; i < dev->data->nb_queues; i++) 1776 rte_tel_data_add_array_int(d, i); 1777 1778 return 0; 1779 } 1780 1781 static int 1782 handle_queue_links(const char *cmd __rte_unused, 1783 const char *params, 1784 struct rte_tel_data *d) 1785 { 1786 int i, ret, port_id = 0; 1787 char *end_param; 1788 uint8_t dev_id; 1789 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 1790 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV]; 1791 const char *p_param; 1792 1793 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1794 return -1; 1795 1796 /* Get dev ID from parameter string */ 1797 dev_id = strtoul(params, &end_param, 10); 1798 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1799 1800 p_param = strtok(end_param, ","); 1801 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param)) 1802 return -1; 1803 1804 port_id = strtoul(p_param, &end_param, 10); 1805 p_param = strtok(NULL, "\0"); 1806 if (p_param != NULL) 1807 RTE_EDEV_LOG_DEBUG( 1808 "Extra parameters passed to eventdev telemetry command, ignoring"); 1809 1810 ret = rte_event_port_links_get(dev_id, port_id, queues, priorities); 1811 if (ret < 0) 1812 return -1; 1813 1814 rte_tel_data_start_dict(d); 1815 for (i = 0; i < ret; i++) { 1816 char qid_name[32]; 1817 1818 snprintf(qid_name, 31, "qid_%u", queues[i]); 1819 rte_tel_data_add_dict_uint(d, qid_name, priorities[i]); 1820 } 1821 1822 return 0; 1823 } 1824 1825 static int 1826 eventdev_build_telemetry_data(int dev_id, 1827 enum rte_event_dev_xstats_mode mode, 1828 int port_queue_id, 1829 struct rte_tel_data *d) 1830 { 1831 struct rte_event_dev_xstats_name *xstat_names; 1832 uint64_t *ids; 1833 uint64_t *values; 1834 int i, ret, num_xstats; 1835 1836 num_xstats = rte_event_dev_xstats_names_get(dev_id, 1837 mode, 1838 port_queue_id, 1839 NULL, 1840 NULL, 1841 0); 1842 1843 if (num_xstats < 0) 1844 return -1; 1845 1846 /* use one malloc for names */ 1847 xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name)) 1848 * num_xstats); 1849 if (xstat_names == NULL) 1850 return -1; 1851 1852 ids = malloc((sizeof(uint64_t)) * num_xstats); 1853 if (ids == NULL) { 1854 free(xstat_names); 1855 return -1; 1856 } 1857 1858 values = malloc((sizeof(uint64_t)) * num_xstats); 1859 if (values == NULL) { 1860 free(xstat_names); 1861 free(ids); 1862 return -1; 1863 } 1864 1865 ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id, 1866 xstat_names, ids, num_xstats); 1867 if (ret < 0 || ret > num_xstats) { 1868 free(xstat_names); 1869 free(ids); 1870 free(values); 1871 return -1; 1872 } 1873 1874 ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id, 1875 ids, values, num_xstats); 1876 if (ret < 0 || ret > num_xstats) { 1877 free(xstat_names); 1878 free(ids); 1879 free(values); 1880 return -1; 1881 } 1882 1883 rte_tel_data_start_dict(d); 1884 for (i = 0; i < num_xstats; i++) 1885 rte_tel_data_add_dict_uint(d, xstat_names[i].name, values[i]); 1886 1887 free(xstat_names); 1888 free(ids); 1889 free(values); 1890 return 0; 1891 } 1892 1893 static int 1894 handle_dev_xstats(const char *cmd __rte_unused, 1895 const char *params, 1896 struct rte_tel_data *d) 1897 { 1898 int dev_id; 1899 enum rte_event_dev_xstats_mode mode; 1900 char *end_param; 1901 1902 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1903 return -1; 1904 1905 /* Get dev ID from parameter string */ 1906 dev_id = strtoul(params, &end_param, 10); 1907 if (*end_param != '\0') 1908 RTE_EDEV_LOG_DEBUG( 1909 "Extra parameters passed to eventdev telemetry command, ignoring"); 1910 1911 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1912 1913 mode = RTE_EVENT_DEV_XSTATS_DEVICE; 1914 return eventdev_build_telemetry_data(dev_id, mode, 0, d); 1915 } 1916 1917 static int 1918 handle_port_xstats(const char *cmd __rte_unused, 1919 const char *params, 1920 struct rte_tel_data *d) 1921 { 1922 int dev_id; 1923 int port_queue_id = 0; 1924 enum rte_event_dev_xstats_mode mode; 1925 char *end_param; 1926 const char *p_param; 1927 1928 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1929 return -1; 1930 1931 /* Get dev ID from parameter string */ 1932 dev_id = strtoul(params, &end_param, 10); 1933 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1934 1935 p_param = strtok(end_param, ","); 1936 mode = RTE_EVENT_DEV_XSTATS_PORT; 1937 1938 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param)) 1939 return -1; 1940 1941 port_queue_id = strtoul(p_param, &end_param, 10); 1942 1943 p_param = strtok(NULL, "\0"); 1944 if (p_param != NULL) 1945 RTE_EDEV_LOG_DEBUG( 1946 "Extra parameters passed to eventdev telemetry command, ignoring"); 1947 1948 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d); 1949 } 1950 1951 static int 1952 handle_queue_xstats(const char *cmd __rte_unused, 1953 const char *params, 1954 struct rte_tel_data *d) 1955 { 1956 int dev_id; 1957 int port_queue_id = 0; 1958 enum rte_event_dev_xstats_mode mode; 1959 char *end_param; 1960 const char *p_param; 1961 1962 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1963 return -1; 1964 1965 /* Get dev ID from parameter string */ 1966 dev_id = strtoul(params, &end_param, 10); 1967 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1968 1969 p_param = strtok(end_param, ","); 1970 mode = RTE_EVENT_DEV_XSTATS_QUEUE; 1971 1972 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param)) 1973 return -1; 1974 1975 port_queue_id = strtoul(p_param, &end_param, 10); 1976 1977 p_param = strtok(NULL, "\0"); 1978 if (p_param != NULL) 1979 RTE_EDEV_LOG_DEBUG( 1980 "Extra parameters passed to eventdev telemetry command, ignoring"); 1981 1982 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d); 1983 } 1984 1985 static int 1986 handle_dev_dump(const char *cmd __rte_unused, 1987 const char *params, 1988 struct rte_tel_data *d) 1989 { 1990 char *buf, *end_param; 1991 int dev_id, ret; 1992 FILE *f; 1993 1994 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1995 return -1; 1996 1997 /* Get dev ID from parameter string */ 1998 dev_id = strtoul(params, &end_param, 10); 1999 if (*end_param != '\0') 2000 RTE_EDEV_LOG_DEBUG( 2001 "Extra parameters passed to eventdev telemetry command, ignoring"); 2002 2003 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 2004 2005 buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); 2006 if (buf == NULL) 2007 return -ENOMEM; 2008 2009 f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+"); 2010 if (f == NULL) { 2011 free(buf); 2012 return -EINVAL; 2013 } 2014 2015 ret = rte_event_dev_dump(dev_id, f); 2016 fclose(f); 2017 if (ret == 0) { 2018 rte_tel_data_start_dict(d); 2019 rte_tel_data_string(d, buf); 2020 } 2021 2022 free(buf); 2023 return ret; 2024 } 2025 2026 RTE_INIT(eventdev_init_telemetry) 2027 { 2028 rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list, 2029 "Returns list of available eventdevs. Takes no parameters"); 2030 rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list, 2031 "Returns list of available ports. Parameter: DevID"); 2032 rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list, 2033 "Returns list of available queues. Parameter: DevID"); 2034 2035 rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats, 2036 "Returns stats for an eventdev. Parameter: DevID"); 2037 rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats, 2038 "Returns stats for an eventdev port. Params: DevID,PortID"); 2039 rte_telemetry_register_cmd("/eventdev/queue_xstats", 2040 handle_queue_xstats, 2041 "Returns stats for an eventdev queue. Params: DevID,QueueID"); 2042 rte_telemetry_register_cmd("/eventdev/dev_dump", handle_dev_dump, 2043 "Returns dump information for an eventdev. Parameter: DevID"); 2044 rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links, 2045 "Returns links for an eventdev port. Params: DevID,QueueID"); 2046 } 2047