1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016 Cavium, Inc 3 */ 4 5 #include <ctype.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <errno.h> 10 #include <stdint.h> 11 #include <inttypes.h> 12 13 #include <rte_string_fns.h> 14 #include <rte_log.h> 15 #include <dev_driver.h> 16 #include <rte_memzone.h> 17 #include <rte_eal.h> 18 #include <rte_common.h> 19 #include <rte_malloc.h> 20 #include <rte_errno.h> 21 #include <ethdev_driver.h> 22 #include <rte_cryptodev.h> 23 #include <cryptodev_pmd.h> 24 #include <rte_telemetry.h> 25 26 #include "rte_eventdev.h" 27 #include "eventdev_pmd.h" 28 #include "eventdev_trace.h" 29 30 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS]; 31 32 struct rte_eventdev *rte_eventdevs = rte_event_devices; 33 34 static struct rte_eventdev_global eventdev_globals = { 35 .nb_devs = 0 36 }; 37 38 /* Public fastpath APIs. */ 39 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS]; 40 41 /* Event dev north bound API implementation */ 42 43 uint8_t 44 rte_event_dev_count(void) 45 { 46 return eventdev_globals.nb_devs; 47 } 48 49 int 50 rte_event_dev_get_dev_id(const char *name) 51 { 52 int i; 53 uint8_t cmp; 54 55 if (!name) 56 return -EINVAL; 57 58 for (i = 0; i < eventdev_globals.nb_devs; i++) { 59 cmp = (strncmp(rte_event_devices[i].data->name, name, 60 RTE_EVENTDEV_NAME_MAX_LEN) == 0) || 61 (rte_event_devices[i].dev ? (strncmp( 62 rte_event_devices[i].dev->driver->name, name, 63 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0); 64 if (cmp && (rte_event_devices[i].attached == 65 RTE_EVENTDEV_ATTACHED)) 66 return i; 67 } 68 return -ENODEV; 69 } 70 71 int 72 rte_event_dev_socket_id(uint8_t dev_id) 73 { 74 struct rte_eventdev *dev; 75 76 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 77 dev = &rte_eventdevs[dev_id]; 78 79 return dev->data->socket_id; 80 } 81 82 int 83 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info) 84 { 85 struct rte_eventdev *dev; 86 87 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 88 dev = &rte_eventdevs[dev_id]; 89 90 if (dev_info == NULL) 91 return -EINVAL; 92 93 memset(dev_info, 0, sizeof(struct rte_event_dev_info)); 94 95 if (*dev->dev_ops->dev_infos_get == NULL) 96 return -ENOTSUP; 97 (*dev->dev_ops->dev_infos_get)(dev, dev_info); 98 99 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns; 100 101 dev_info->dev = dev->dev; 102 return 0; 103 } 104 105 int 106 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, 107 uint32_t *caps) 108 { 109 struct rte_eventdev *dev; 110 111 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 112 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL); 113 114 dev = &rte_eventdevs[dev_id]; 115 116 if (caps == NULL) 117 return -EINVAL; 118 119 if (dev->dev_ops->eth_rx_adapter_caps_get == NULL) 120 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 121 else 122 *caps = 0; 123 124 return dev->dev_ops->eth_rx_adapter_caps_get ? 125 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev, 126 &rte_eth_devices[eth_port_id], 127 caps) 128 : 0; 129 } 130 131 int 132 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps) 133 { 134 struct rte_eventdev *dev; 135 const struct event_timer_adapter_ops *ops; 136 137 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 138 139 dev = &rte_eventdevs[dev_id]; 140 141 if (caps == NULL) 142 return -EINVAL; 143 *caps = 0; 144 145 return dev->dev_ops->timer_adapter_caps_get ? 146 (*dev->dev_ops->timer_adapter_caps_get)(dev, 147 0, 148 caps, 149 &ops) 150 : 0; 151 } 152 153 int 154 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, 155 uint32_t *caps) 156 { 157 struct rte_eventdev *dev; 158 struct rte_cryptodev *cdev; 159 160 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 161 if (!rte_cryptodev_is_valid_dev(cdev_id)) 162 return -EINVAL; 163 164 dev = &rte_eventdevs[dev_id]; 165 cdev = rte_cryptodev_pmd_get_dev(cdev_id); 166 167 if (caps == NULL) 168 return -EINVAL; 169 170 if (dev->dev_ops->crypto_adapter_caps_get == NULL) 171 *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP; 172 else 173 *caps = 0; 174 175 return dev->dev_ops->crypto_adapter_caps_get ? 176 (*dev->dev_ops->crypto_adapter_caps_get) 177 (dev, cdev, caps) : 0; 178 } 179 180 int 181 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, 182 uint32_t *caps) 183 { 184 struct rte_eventdev *dev; 185 struct rte_eth_dev *eth_dev; 186 187 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 188 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL); 189 190 dev = &rte_eventdevs[dev_id]; 191 eth_dev = &rte_eth_devices[eth_port_id]; 192 193 if (caps == NULL) 194 return -EINVAL; 195 196 if (dev->dev_ops->eth_tx_adapter_caps_get == NULL) 197 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR; 198 else 199 *caps = 0; 200 201 return dev->dev_ops->eth_tx_adapter_caps_get ? 202 (*dev->dev_ops->eth_tx_adapter_caps_get)(dev, 203 eth_dev, 204 caps) 205 : 0; 206 } 207 208 static inline int 209 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues) 210 { 211 uint8_t old_nb_queues = dev->data->nb_queues; 212 struct rte_event_queue_conf *queues_cfg; 213 unsigned int i; 214 215 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues, 216 dev->data->dev_id); 217 218 if (nb_queues != 0) { 219 queues_cfg = dev->data->queues_cfg; 220 if (*dev->dev_ops->queue_release == NULL) 221 return -ENOTSUP; 222 223 for (i = nb_queues; i < old_nb_queues; i++) 224 (*dev->dev_ops->queue_release)(dev, i); 225 226 227 if (nb_queues > old_nb_queues) { 228 uint8_t new_qs = nb_queues - old_nb_queues; 229 230 memset(queues_cfg + old_nb_queues, 0, 231 sizeof(queues_cfg[0]) * new_qs); 232 } 233 } else { 234 if (*dev->dev_ops->queue_release == NULL) 235 return -ENOTSUP; 236 237 for (i = nb_queues; i < old_nb_queues; i++) 238 (*dev->dev_ops->queue_release)(dev, i); 239 } 240 241 dev->data->nb_queues = nb_queues; 242 return 0; 243 } 244 245 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead) 246 247 static inline int 248 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) 249 { 250 uint8_t old_nb_ports = dev->data->nb_ports; 251 void **ports; 252 uint16_t *links_map; 253 struct rte_event_port_conf *ports_cfg; 254 unsigned int i; 255 256 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports, 257 dev->data->dev_id); 258 259 if (nb_ports != 0) { /* re-config */ 260 if (*dev->dev_ops->port_release == NULL) 261 return -ENOTSUP; 262 263 ports = dev->data->ports; 264 ports_cfg = dev->data->ports_cfg; 265 links_map = dev->data->links_map; 266 267 for (i = nb_ports; i < old_nb_ports; i++) 268 (*dev->dev_ops->port_release)(ports[i]); 269 270 if (nb_ports > old_nb_ports) { 271 uint8_t new_ps = nb_ports - old_nb_ports; 272 unsigned int old_links_map_end = 273 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; 274 unsigned int links_map_end = 275 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; 276 277 memset(ports + old_nb_ports, 0, 278 sizeof(ports[0]) * new_ps); 279 memset(ports_cfg + old_nb_ports, 0, 280 sizeof(ports_cfg[0]) * new_ps); 281 for (i = old_links_map_end; i < links_map_end; i++) 282 links_map[i] = 283 EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 284 } 285 } else { 286 if (*dev->dev_ops->port_release == NULL) 287 return -ENOTSUP; 288 289 ports = dev->data->ports; 290 for (i = nb_ports; i < old_nb_ports; i++) { 291 (*dev->dev_ops->port_release)(ports[i]); 292 ports[i] = NULL; 293 } 294 } 295 296 dev->data->nb_ports = nb_ports; 297 return 0; 298 } 299 300 int 301 rte_event_dev_configure(uint8_t dev_id, 302 const struct rte_event_dev_config *dev_conf) 303 { 304 struct rte_event_dev_info info; 305 struct rte_eventdev *dev; 306 int diag; 307 308 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 309 dev = &rte_eventdevs[dev_id]; 310 311 if (*dev->dev_ops->dev_infos_get == NULL) 312 return -ENOTSUP; 313 if (*dev->dev_ops->dev_configure == NULL) 314 return -ENOTSUP; 315 316 if (dev->data->dev_started) { 317 RTE_EDEV_LOG_ERR( 318 "device %d must be stopped to allow configuration", dev_id); 319 return -EBUSY; 320 } 321 322 if (dev_conf == NULL) 323 return -EINVAL; 324 325 (*dev->dev_ops->dev_infos_get)(dev, &info); 326 327 /* Check dequeue_timeout_ns value is in limit */ 328 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) { 329 if (dev_conf->dequeue_timeout_ns && 330 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns 331 || dev_conf->dequeue_timeout_ns > 332 info.max_dequeue_timeout_ns)) { 333 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d" 334 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d", 335 dev_id, dev_conf->dequeue_timeout_ns, 336 info.min_dequeue_timeout_ns, 337 info.max_dequeue_timeout_ns); 338 return -EINVAL; 339 } 340 } 341 342 /* Check nb_events_limit is in limit */ 343 if (dev_conf->nb_events_limit > info.max_num_events) { 344 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d", 345 dev_id, dev_conf->nb_events_limit, info.max_num_events); 346 return -EINVAL; 347 } 348 349 /* Check nb_event_queues is in limit */ 350 if (!dev_conf->nb_event_queues) { 351 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero", 352 dev_id); 353 return -EINVAL; 354 } 355 if (dev_conf->nb_event_queues > info.max_event_queues + 356 info.max_single_link_event_port_queue_pairs) { 357 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d", 358 dev_id, dev_conf->nb_event_queues, 359 info.max_event_queues, 360 info.max_single_link_event_port_queue_pairs); 361 return -EINVAL; 362 } 363 if (dev_conf->nb_event_queues - 364 dev_conf->nb_single_link_event_port_queues > 365 info.max_event_queues) { 366 RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d", 367 dev_id, dev_conf->nb_event_queues, 368 dev_conf->nb_single_link_event_port_queues, 369 info.max_event_queues); 370 return -EINVAL; 371 } 372 if (dev_conf->nb_single_link_event_port_queues > 373 dev_conf->nb_event_queues) { 374 RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d", 375 dev_id, 376 dev_conf->nb_single_link_event_port_queues, 377 dev_conf->nb_event_queues); 378 return -EINVAL; 379 } 380 381 /* Check nb_event_ports is in limit */ 382 if (!dev_conf->nb_event_ports) { 383 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id); 384 return -EINVAL; 385 } 386 if (dev_conf->nb_event_ports > info.max_event_ports + 387 info.max_single_link_event_port_queue_pairs) { 388 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d", 389 dev_id, dev_conf->nb_event_ports, 390 info.max_event_ports, 391 info.max_single_link_event_port_queue_pairs); 392 return -EINVAL; 393 } 394 if (dev_conf->nb_event_ports - 395 dev_conf->nb_single_link_event_port_queues 396 > info.max_event_ports) { 397 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d", 398 dev_id, dev_conf->nb_event_ports, 399 dev_conf->nb_single_link_event_port_queues, 400 info.max_event_ports); 401 return -EINVAL; 402 } 403 404 if (dev_conf->nb_single_link_event_port_queues > 405 dev_conf->nb_event_ports) { 406 RTE_EDEV_LOG_ERR( 407 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d", 408 dev_id, 409 dev_conf->nb_single_link_event_port_queues, 410 dev_conf->nb_event_ports); 411 return -EINVAL; 412 } 413 414 /* Check nb_event_queue_flows is in limit */ 415 if (!dev_conf->nb_event_queue_flows) { 416 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id); 417 return -EINVAL; 418 } 419 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) { 420 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x", 421 dev_id, dev_conf->nb_event_queue_flows, 422 info.max_event_queue_flows); 423 return -EINVAL; 424 } 425 426 /* Check nb_event_port_dequeue_depth is in limit */ 427 if (!dev_conf->nb_event_port_dequeue_depth) { 428 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero", 429 dev_id); 430 return -EINVAL; 431 } 432 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && 433 (dev_conf->nb_event_port_dequeue_depth > 434 info.max_event_port_dequeue_depth)) { 435 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d", 436 dev_id, dev_conf->nb_event_port_dequeue_depth, 437 info.max_event_port_dequeue_depth); 438 return -EINVAL; 439 } 440 441 /* Check nb_event_port_enqueue_depth is in limit */ 442 if (!dev_conf->nb_event_port_enqueue_depth) { 443 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero", 444 dev_id); 445 return -EINVAL; 446 } 447 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && 448 (dev_conf->nb_event_port_enqueue_depth > 449 info.max_event_port_enqueue_depth)) { 450 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d", 451 dev_id, dev_conf->nb_event_port_enqueue_depth, 452 info.max_event_port_enqueue_depth); 453 return -EINVAL; 454 } 455 456 /* Copy the dev_conf parameter into the dev structure */ 457 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); 458 459 /* Setup new number of queues and reconfigure device. */ 460 diag = event_dev_queue_config(dev, dev_conf->nb_event_queues); 461 if (diag != 0) { 462 RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id, 463 diag); 464 return diag; 465 } 466 467 /* Setup new number of ports and reconfigure device. */ 468 diag = event_dev_port_config(dev, dev_conf->nb_event_ports); 469 if (diag != 0) { 470 event_dev_queue_config(dev, 0); 471 RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id, 472 diag); 473 return diag; 474 } 475 476 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id); 477 478 /* Configure the device */ 479 diag = (*dev->dev_ops->dev_configure)(dev); 480 if (diag != 0) { 481 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag); 482 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id); 483 event_dev_queue_config(dev, 0); 484 event_dev_port_config(dev, 0); 485 } 486 487 dev->data->event_dev_cap = info.event_dev_cap; 488 rte_eventdev_trace_configure(dev_id, dev_conf, diag); 489 return diag; 490 } 491 492 static inline int 493 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id) 494 { 495 if (queue_id < dev->data->nb_queues && queue_id < 496 RTE_EVENT_MAX_QUEUES_PER_DEV) 497 return 1; 498 else 499 return 0; 500 } 501 502 int 503 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, 504 struct rte_event_queue_conf *queue_conf) 505 { 506 struct rte_eventdev *dev; 507 508 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 509 dev = &rte_eventdevs[dev_id]; 510 511 if (queue_conf == NULL) 512 return -EINVAL; 513 514 if (!is_valid_queue(dev, queue_id)) { 515 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 516 return -EINVAL; 517 } 518 519 if (*dev->dev_ops->queue_def_conf == NULL) 520 return -ENOTSUP; 521 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf)); 522 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf); 523 return 0; 524 } 525 526 static inline int 527 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf) 528 { 529 if (queue_conf && 530 !(queue_conf->event_queue_cfg & 531 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && 532 ((queue_conf->event_queue_cfg & 533 RTE_EVENT_QUEUE_CFG_ALL_TYPES) || 534 (queue_conf->schedule_type 535 == RTE_SCHED_TYPE_ATOMIC) 536 )) 537 return 1; 538 else 539 return 0; 540 } 541 542 static inline int 543 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf) 544 { 545 if (queue_conf && 546 !(queue_conf->event_queue_cfg & 547 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && 548 ((queue_conf->event_queue_cfg & 549 RTE_EVENT_QUEUE_CFG_ALL_TYPES) || 550 (queue_conf->schedule_type 551 == RTE_SCHED_TYPE_ORDERED) 552 )) 553 return 1; 554 else 555 return 0; 556 } 557 558 559 int 560 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, 561 const struct rte_event_queue_conf *queue_conf) 562 { 563 struct rte_eventdev *dev; 564 struct rte_event_queue_conf def_conf; 565 566 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 567 dev = &rte_eventdevs[dev_id]; 568 569 if (!is_valid_queue(dev, queue_id)) { 570 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 571 return -EINVAL; 572 } 573 574 /* Check nb_atomic_flows limit */ 575 if (is_valid_atomic_queue_conf(queue_conf)) { 576 if (queue_conf->nb_atomic_flows == 0 || 577 queue_conf->nb_atomic_flows > 578 dev->data->dev_conf.nb_event_queue_flows) { 579 RTE_EDEV_LOG_ERR( 580 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d", 581 dev_id, queue_id, queue_conf->nb_atomic_flows, 582 dev->data->dev_conf.nb_event_queue_flows); 583 return -EINVAL; 584 } 585 } 586 587 /* Check nb_atomic_order_sequences limit */ 588 if (is_valid_ordered_queue_conf(queue_conf)) { 589 if (queue_conf->nb_atomic_order_sequences == 0 || 590 queue_conf->nb_atomic_order_sequences > 591 dev->data->dev_conf.nb_event_queue_flows) { 592 RTE_EDEV_LOG_ERR( 593 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d", 594 dev_id, queue_id, queue_conf->nb_atomic_order_sequences, 595 dev->data->dev_conf.nb_event_queue_flows); 596 return -EINVAL; 597 } 598 } 599 600 if (dev->data->dev_started) { 601 RTE_EDEV_LOG_ERR( 602 "device %d must be stopped to allow queue setup", dev_id); 603 return -EBUSY; 604 } 605 606 if (*dev->dev_ops->queue_setup == NULL) 607 return -ENOTSUP; 608 609 if (queue_conf == NULL) { 610 if (*dev->dev_ops->queue_def_conf == NULL) 611 return -ENOTSUP; 612 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf); 613 queue_conf = &def_conf; 614 } 615 616 dev->data->queues_cfg[queue_id] = *queue_conf; 617 rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf); 618 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf); 619 } 620 621 static inline int 622 is_valid_port(struct rte_eventdev *dev, uint8_t port_id) 623 { 624 if (port_id < dev->data->nb_ports) 625 return 1; 626 else 627 return 0; 628 } 629 630 int 631 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, 632 struct rte_event_port_conf *port_conf) 633 { 634 struct rte_eventdev *dev; 635 636 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 637 dev = &rte_eventdevs[dev_id]; 638 639 if (port_conf == NULL) 640 return -EINVAL; 641 642 if (!is_valid_port(dev, port_id)) { 643 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 644 return -EINVAL; 645 } 646 647 if (*dev->dev_ops->port_def_conf == NULL) 648 return -ENOTSUP; 649 memset(port_conf, 0, sizeof(struct rte_event_port_conf)); 650 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf); 651 return 0; 652 } 653 654 int 655 rte_event_port_setup(uint8_t dev_id, uint8_t port_id, 656 const struct rte_event_port_conf *port_conf) 657 { 658 struct rte_eventdev *dev; 659 struct rte_event_port_conf def_conf; 660 int diag; 661 662 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 663 dev = &rte_eventdevs[dev_id]; 664 665 if (!is_valid_port(dev, port_id)) { 666 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 667 return -EINVAL; 668 } 669 670 /* Check new_event_threshold limit */ 671 if ((port_conf && !port_conf->new_event_threshold) || 672 (port_conf && port_conf->new_event_threshold > 673 dev->data->dev_conf.nb_events_limit)) { 674 RTE_EDEV_LOG_ERR( 675 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d", 676 dev_id, port_id, port_conf->new_event_threshold, 677 dev->data->dev_conf.nb_events_limit); 678 return -EINVAL; 679 } 680 681 /* Check dequeue_depth limit */ 682 if ((port_conf && !port_conf->dequeue_depth) || 683 (port_conf && port_conf->dequeue_depth > 684 dev->data->dev_conf.nb_event_port_dequeue_depth)) { 685 RTE_EDEV_LOG_ERR( 686 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d", 687 dev_id, port_id, port_conf->dequeue_depth, 688 dev->data->dev_conf.nb_event_port_dequeue_depth); 689 return -EINVAL; 690 } 691 692 /* Check enqueue_depth limit */ 693 if ((port_conf && !port_conf->enqueue_depth) || 694 (port_conf && port_conf->enqueue_depth > 695 dev->data->dev_conf.nb_event_port_enqueue_depth)) { 696 RTE_EDEV_LOG_ERR( 697 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d", 698 dev_id, port_id, port_conf->enqueue_depth, 699 dev->data->dev_conf.nb_event_port_enqueue_depth); 700 return -EINVAL; 701 } 702 703 if (port_conf && 704 (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) && 705 !(dev->data->event_dev_cap & 706 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) { 707 RTE_EDEV_LOG_ERR( 708 "dev%d port%d Implicit release disable not supported", 709 dev_id, port_id); 710 return -EINVAL; 711 } 712 713 if (dev->data->dev_started) { 714 RTE_EDEV_LOG_ERR( 715 "device %d must be stopped to allow port setup", dev_id); 716 return -EBUSY; 717 } 718 719 if (*dev->dev_ops->port_setup == NULL) 720 return -ENOTSUP; 721 722 if (port_conf == NULL) { 723 if (*dev->dev_ops->port_def_conf == NULL) 724 return -ENOTSUP; 725 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf); 726 port_conf = &def_conf; 727 } 728 729 dev->data->ports_cfg[port_id] = *port_conf; 730 731 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf); 732 733 /* Unlink all the queues from this port(default state after setup) */ 734 if (!diag) 735 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0); 736 737 rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag); 738 if (diag < 0) 739 return diag; 740 741 return 0; 742 } 743 744 void 745 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, 746 rte_eventdev_port_flush_t release_cb, void *args) 747 { 748 struct rte_eventdev *dev; 749 750 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id); 751 dev = &rte_eventdevs[dev_id]; 752 753 if (!is_valid_port(dev, port_id)) { 754 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 755 return; 756 } 757 758 if (dev->dev_ops->port_quiesce) 759 (*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id], 760 release_cb, args); 761 } 762 763 int 764 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, 765 uint32_t *attr_value) 766 { 767 struct rte_eventdev *dev; 768 769 if (!attr_value) 770 return -EINVAL; 771 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 772 dev = &rte_eventdevs[dev_id]; 773 774 switch (attr_id) { 775 case RTE_EVENT_DEV_ATTR_PORT_COUNT: 776 *attr_value = dev->data->nb_ports; 777 break; 778 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT: 779 *attr_value = dev->data->nb_queues; 780 break; 781 case RTE_EVENT_DEV_ATTR_STARTED: 782 *attr_value = dev->data->dev_started; 783 break; 784 default: 785 return -EINVAL; 786 } 787 788 return 0; 789 } 790 791 int 792 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, 793 uint32_t *attr_value) 794 { 795 struct rte_eventdev *dev; 796 797 if (!attr_value) 798 return -EINVAL; 799 800 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 801 dev = &rte_eventdevs[dev_id]; 802 if (!is_valid_port(dev, port_id)) { 803 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 804 return -EINVAL; 805 } 806 807 switch (attr_id) { 808 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH: 809 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth; 810 break; 811 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH: 812 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth; 813 break; 814 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD: 815 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold; 816 break; 817 case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE: 818 { 819 uint32_t config; 820 821 config = dev->data->ports_cfg[port_id].event_port_cfg; 822 *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL); 823 break; 824 } 825 default: 826 return -EINVAL; 827 }; 828 return 0; 829 } 830 831 int 832 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, 833 uint32_t *attr_value) 834 { 835 struct rte_event_queue_conf *conf; 836 struct rte_eventdev *dev; 837 838 if (!attr_value) 839 return -EINVAL; 840 841 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 842 dev = &rte_eventdevs[dev_id]; 843 if (!is_valid_queue(dev, queue_id)) { 844 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 845 return -EINVAL; 846 } 847 848 conf = &dev->data->queues_cfg[queue_id]; 849 850 switch (attr_id) { 851 case RTE_EVENT_QUEUE_ATTR_PRIORITY: 852 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL; 853 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) 854 *attr_value = conf->priority; 855 break; 856 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS: 857 *attr_value = conf->nb_atomic_flows; 858 break; 859 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES: 860 *attr_value = conf->nb_atomic_order_sequences; 861 break; 862 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG: 863 *attr_value = conf->event_queue_cfg; 864 break; 865 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE: 866 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) 867 return -EOVERFLOW; 868 869 *attr_value = conf->schedule_type; 870 break; 871 case RTE_EVENT_QUEUE_ATTR_WEIGHT: 872 *attr_value = RTE_EVENT_QUEUE_WEIGHT_LOWEST; 873 if (dev->dev_ops->queue_attr_get) 874 return (*dev->dev_ops->queue_attr_get)( 875 dev, queue_id, attr_id, attr_value); 876 break; 877 case RTE_EVENT_QUEUE_ATTR_AFFINITY: 878 *attr_value = RTE_EVENT_QUEUE_AFFINITY_LOWEST; 879 if (dev->dev_ops->queue_attr_get) 880 return (*dev->dev_ops->queue_attr_get)( 881 dev, queue_id, attr_id, attr_value); 882 break; 883 default: 884 return -EINVAL; 885 }; 886 return 0; 887 } 888 889 int 890 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, 891 uint64_t attr_value) 892 { 893 struct rte_eventdev *dev; 894 895 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 896 dev = &rte_eventdevs[dev_id]; 897 if (!is_valid_queue(dev, queue_id)) { 898 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 899 return -EINVAL; 900 } 901 902 if (!(dev->data->event_dev_cap & 903 RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR)) { 904 RTE_EDEV_LOG_ERR( 905 "Device %" PRIu8 "does not support changing queue attributes at runtime", 906 dev_id); 907 return -ENOTSUP; 908 } 909 910 if (*dev->dev_ops->queue_attr_set == NULL) 911 return -ENOTSUP; 912 return (*dev->dev_ops->queue_attr_set)(dev, queue_id, attr_id, 913 attr_value); 914 } 915 916 int 917 rte_event_port_link(uint8_t dev_id, uint8_t port_id, 918 const uint8_t queues[], const uint8_t priorities[], 919 uint16_t nb_links) 920 { 921 struct rte_eventdev *dev; 922 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV]; 923 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV]; 924 uint16_t *links_map; 925 int i, diag; 926 927 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); 928 dev = &rte_eventdevs[dev_id]; 929 930 if (*dev->dev_ops->port_link == NULL) { 931 RTE_EDEV_LOG_ERR("Function not supported\n"); 932 rte_errno = ENOTSUP; 933 return 0; 934 } 935 936 if (!is_valid_port(dev, port_id)) { 937 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 938 rte_errno = EINVAL; 939 return 0; 940 } 941 942 if (queues == NULL) { 943 for (i = 0; i < dev->data->nb_queues; i++) 944 queues_list[i] = i; 945 946 queues = queues_list; 947 nb_links = dev->data->nb_queues; 948 } 949 950 if (priorities == NULL) { 951 for (i = 0; i < nb_links; i++) 952 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL; 953 954 priorities = priorities_list; 955 } 956 957 for (i = 0; i < nb_links; i++) 958 if (queues[i] >= dev->data->nb_queues) { 959 rte_errno = EINVAL; 960 return 0; 961 } 962 963 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], 964 queues, priorities, nb_links); 965 if (diag < 0) 966 return diag; 967 968 links_map = dev->data->links_map; 969 /* Point links_map to this port specific area */ 970 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 971 for (i = 0; i < diag; i++) 972 links_map[queues[i]] = (uint8_t)priorities[i]; 973 974 rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag); 975 return diag; 976 } 977 978 int 979 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, 980 uint8_t queues[], uint16_t nb_unlinks) 981 { 982 struct rte_eventdev *dev; 983 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 984 int i, diag, j; 985 uint16_t *links_map; 986 987 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); 988 dev = &rte_eventdevs[dev_id]; 989 990 if (*dev->dev_ops->port_unlink == NULL) { 991 RTE_EDEV_LOG_ERR("Function not supported"); 992 rte_errno = ENOTSUP; 993 return 0; 994 } 995 996 if (!is_valid_port(dev, port_id)) { 997 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 998 rte_errno = EINVAL; 999 return 0; 1000 } 1001 1002 links_map = dev->data->links_map; 1003 /* Point links_map to this port specific area */ 1004 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 1005 1006 if (queues == NULL) { 1007 j = 0; 1008 for (i = 0; i < dev->data->nb_queues; i++) { 1009 if (links_map[i] != 1010 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { 1011 all_queues[j] = i; 1012 j++; 1013 } 1014 } 1015 queues = all_queues; 1016 } else { 1017 for (j = 0; j < nb_unlinks; j++) { 1018 if (links_map[queues[j]] == 1019 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) 1020 break; 1021 } 1022 } 1023 1024 nb_unlinks = j; 1025 for (i = 0; i < nb_unlinks; i++) 1026 if (queues[i] >= dev->data->nb_queues) { 1027 rte_errno = EINVAL; 1028 return 0; 1029 } 1030 1031 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], 1032 queues, nb_unlinks); 1033 1034 if (diag < 0) 1035 return diag; 1036 1037 for (i = 0; i < diag; i++) 1038 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 1039 1040 rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag); 1041 return diag; 1042 } 1043 1044 int 1045 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id) 1046 { 1047 struct rte_eventdev *dev; 1048 1049 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1050 dev = &rte_eventdevs[dev_id]; 1051 if (!is_valid_port(dev, port_id)) { 1052 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 1053 return -EINVAL; 1054 } 1055 1056 /* Return 0 if the PMD does not implement unlinks in progress. 1057 * This allows PMDs which handle unlink synchronously to not implement 1058 * this function at all. 1059 */ 1060 if (*dev->dev_ops->port_unlinks_in_progress == NULL) 1061 return 0; 1062 1063 return (*dev->dev_ops->port_unlinks_in_progress)(dev, 1064 dev->data->ports[port_id]); 1065 } 1066 1067 int 1068 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, 1069 uint8_t queues[], uint8_t priorities[]) 1070 { 1071 struct rte_eventdev *dev; 1072 uint16_t *links_map; 1073 int i, count = 0; 1074 1075 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1076 dev = &rte_eventdevs[dev_id]; 1077 if (!is_valid_port(dev, port_id)) { 1078 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 1079 return -EINVAL; 1080 } 1081 1082 links_map = dev->data->links_map; 1083 /* Point links_map to this port specific area */ 1084 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 1085 for (i = 0; i < dev->data->nb_queues; i++) { 1086 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { 1087 queues[count] = i; 1088 priorities[count] = (uint8_t)links_map[i]; 1089 ++count; 1090 } 1091 } 1092 return count; 1093 } 1094 1095 int 1096 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, 1097 uint64_t *timeout_ticks) 1098 { 1099 struct rte_eventdev *dev; 1100 1101 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1102 dev = &rte_eventdevs[dev_id]; 1103 if (*dev->dev_ops->timeout_ticks == NULL) 1104 return -ENOTSUP; 1105 1106 if (timeout_ticks == NULL) 1107 return -EINVAL; 1108 1109 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks); 1110 } 1111 1112 int 1113 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id) 1114 { 1115 struct rte_eventdev *dev; 1116 1117 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1118 dev = &rte_eventdevs[dev_id]; 1119 1120 if (service_id == NULL) 1121 return -EINVAL; 1122 1123 if (dev->data->service_inited) 1124 *service_id = dev->data->service_id; 1125 1126 return dev->data->service_inited ? 0 : -ESRCH; 1127 } 1128 1129 int 1130 rte_event_dev_dump(uint8_t dev_id, FILE *f) 1131 { 1132 struct rte_eventdev *dev; 1133 1134 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1135 dev = &rte_eventdevs[dev_id]; 1136 if (*dev->dev_ops->dump == NULL) 1137 return -ENOTSUP; 1138 if (f == NULL) 1139 return -EINVAL; 1140 1141 (*dev->dev_ops->dump)(dev, f); 1142 return 0; 1143 1144 } 1145 1146 static int 1147 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, 1148 uint8_t queue_port_id) 1149 { 1150 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1151 if (dev->dev_ops->xstats_get_names != NULL) 1152 return (*dev->dev_ops->xstats_get_names)(dev, mode, 1153 queue_port_id, 1154 NULL, NULL, 0); 1155 return 0; 1156 } 1157 1158 int 1159 rte_event_dev_xstats_names_get(uint8_t dev_id, 1160 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, 1161 struct rte_event_dev_xstats_name *xstats_names, 1162 unsigned int *ids, unsigned int size) 1163 { 1164 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV); 1165 const int cnt_expected_entries = xstats_get_count(dev_id, mode, 1166 queue_port_id); 1167 if (xstats_names == NULL || cnt_expected_entries < 0 || 1168 (int)size < cnt_expected_entries) 1169 return cnt_expected_entries; 1170 1171 /* dev_id checked above */ 1172 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1173 1174 if (dev->dev_ops->xstats_get_names != NULL) 1175 return (*dev->dev_ops->xstats_get_names)(dev, mode, 1176 queue_port_id, xstats_names, ids, size); 1177 1178 return -ENOTSUP; 1179 } 1180 1181 /* retrieve eventdev extended statistics */ 1182 int 1183 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, 1184 uint8_t queue_port_id, const unsigned int ids[], 1185 uint64_t values[], unsigned int n) 1186 { 1187 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV); 1188 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1189 1190 /* implemented by the driver */ 1191 if (dev->dev_ops->xstats_get != NULL) 1192 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id, 1193 ids, values, n); 1194 return -ENOTSUP; 1195 } 1196 1197 uint64_t 1198 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, 1199 unsigned int *id) 1200 { 1201 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0); 1202 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1203 unsigned int temp = -1; 1204 1205 if (id != NULL) 1206 *id = (unsigned int)-1; 1207 else 1208 id = &temp; /* ensure driver never gets a NULL value */ 1209 1210 /* implemented by driver */ 1211 if (dev->dev_ops->xstats_get_by_name != NULL) 1212 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id); 1213 return -ENOTSUP; 1214 } 1215 1216 int rte_event_dev_xstats_reset(uint8_t dev_id, 1217 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, 1218 const uint32_t ids[], uint32_t nb_ids) 1219 { 1220 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1221 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1222 1223 if (dev->dev_ops->xstats_reset != NULL) 1224 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id, 1225 ids, nb_ids); 1226 return -ENOTSUP; 1227 } 1228 1229 int rte_event_pmd_selftest_seqn_dynfield_offset = -1; 1230 1231 int rte_event_dev_selftest(uint8_t dev_id) 1232 { 1233 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1234 static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = { 1235 .name = "rte_event_pmd_selftest_seqn_dynfield", 1236 .size = sizeof(rte_event_pmd_selftest_seqn_t), 1237 .align = __alignof__(rte_event_pmd_selftest_seqn_t), 1238 }; 1239 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1240 1241 if (dev->dev_ops->dev_selftest != NULL) { 1242 rte_event_pmd_selftest_seqn_dynfield_offset = 1243 rte_mbuf_dynfield_register(&test_seqn_dynfield_desc); 1244 if (rte_event_pmd_selftest_seqn_dynfield_offset < 0) 1245 return -ENOMEM; 1246 return (*dev->dev_ops->dev_selftest)(); 1247 } 1248 return -ENOTSUP; 1249 } 1250 1251 struct rte_mempool * 1252 rte_event_vector_pool_create(const char *name, unsigned int n, 1253 unsigned int cache_size, uint16_t nb_elem, 1254 int socket_id) 1255 { 1256 const char *mp_ops_name; 1257 struct rte_mempool *mp; 1258 unsigned int elt_sz; 1259 int ret; 1260 1261 if (!nb_elem) { 1262 RTE_LOG(ERR, EVENTDEV, 1263 "Invalid number of elements=%d requested\n", nb_elem); 1264 rte_errno = EINVAL; 1265 return NULL; 1266 } 1267 1268 elt_sz = 1269 sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t)); 1270 mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id, 1271 0); 1272 if (mp == NULL) 1273 return NULL; 1274 1275 mp_ops_name = rte_mbuf_best_mempool_ops(); 1276 ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL); 1277 if (ret != 0) { 1278 RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n"); 1279 goto err; 1280 } 1281 1282 ret = rte_mempool_populate_default(mp); 1283 if (ret < 0) 1284 goto err; 1285 1286 return mp; 1287 err: 1288 rte_mempool_free(mp); 1289 rte_errno = -ret; 1290 return NULL; 1291 } 1292 1293 int 1294 rte_event_dev_start(uint8_t dev_id) 1295 { 1296 struct rte_eventdev *dev; 1297 int diag; 1298 1299 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id); 1300 1301 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1302 dev = &rte_eventdevs[dev_id]; 1303 if (*dev->dev_ops->dev_start == NULL) 1304 return -ENOTSUP; 1305 1306 if (dev->data->dev_started != 0) { 1307 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started", 1308 dev_id); 1309 return 0; 1310 } 1311 1312 diag = (*dev->dev_ops->dev_start)(dev); 1313 rte_eventdev_trace_start(dev_id, diag); 1314 if (diag == 0) 1315 dev->data->dev_started = 1; 1316 else 1317 return diag; 1318 1319 event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev); 1320 1321 return 0; 1322 } 1323 1324 int 1325 rte_event_dev_stop_flush_callback_register(uint8_t dev_id, 1326 eventdev_stop_flush_t callback, void *userdata) 1327 { 1328 struct rte_eventdev *dev; 1329 1330 RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id); 1331 1332 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1333 dev = &rte_eventdevs[dev_id]; 1334 1335 dev->dev_ops->dev_stop_flush = callback; 1336 dev->data->dev_stop_flush_arg = userdata; 1337 1338 return 0; 1339 } 1340 1341 void 1342 rte_event_dev_stop(uint8_t dev_id) 1343 { 1344 struct rte_eventdev *dev; 1345 1346 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id); 1347 1348 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id); 1349 dev = &rte_eventdevs[dev_id]; 1350 if (*dev->dev_ops->dev_stop == NULL) 1351 return; 1352 1353 if (dev->data->dev_started == 0) { 1354 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped", 1355 dev_id); 1356 return; 1357 } 1358 1359 dev->data->dev_started = 0; 1360 (*dev->dev_ops->dev_stop)(dev); 1361 rte_eventdev_trace_stop(dev_id); 1362 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id); 1363 } 1364 1365 int 1366 rte_event_dev_close(uint8_t dev_id) 1367 { 1368 struct rte_eventdev *dev; 1369 1370 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1371 dev = &rte_eventdevs[dev_id]; 1372 if (*dev->dev_ops->dev_close == NULL) 1373 return -ENOTSUP; 1374 1375 /* Device must be stopped before it can be closed */ 1376 if (dev->data->dev_started == 1) { 1377 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing", 1378 dev_id); 1379 return -EBUSY; 1380 } 1381 1382 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id); 1383 rte_eventdev_trace_close(dev_id); 1384 return (*dev->dev_ops->dev_close)(dev); 1385 } 1386 1387 static inline int 1388 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data, 1389 int socket_id) 1390 { 1391 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; 1392 const struct rte_memzone *mz; 1393 int n; 1394 1395 /* Generate memzone name */ 1396 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id); 1397 if (n >= (int)sizeof(mz_name)) 1398 return -EINVAL; 1399 1400 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1401 mz = rte_memzone_reserve(mz_name, 1402 sizeof(struct rte_eventdev_data), 1403 socket_id, 0); 1404 } else 1405 mz = rte_memzone_lookup(mz_name); 1406 1407 if (mz == NULL) 1408 return -ENOMEM; 1409 1410 *data = mz->addr; 1411 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1412 memset(*data, 0, sizeof(struct rte_eventdev_data)); 1413 for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV * 1414 RTE_EVENT_MAX_QUEUES_PER_DEV; 1415 n++) 1416 (*data)->links_map[n] = 1417 EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 1418 } 1419 1420 return 0; 1421 } 1422 1423 static inline uint8_t 1424 eventdev_find_free_device_index(void) 1425 { 1426 uint8_t dev_id; 1427 1428 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) { 1429 if (rte_eventdevs[dev_id].attached == 1430 RTE_EVENTDEV_DETACHED) 1431 return dev_id; 1432 } 1433 return RTE_EVENT_MAX_DEVS; 1434 } 1435 1436 struct rte_eventdev * 1437 rte_event_pmd_allocate(const char *name, int socket_id) 1438 { 1439 struct rte_eventdev *eventdev; 1440 uint8_t dev_id; 1441 1442 if (rte_event_pmd_get_named_dev(name) != NULL) { 1443 RTE_EDEV_LOG_ERR("Event device with name %s already " 1444 "allocated!", name); 1445 return NULL; 1446 } 1447 1448 dev_id = eventdev_find_free_device_index(); 1449 if (dev_id == RTE_EVENT_MAX_DEVS) { 1450 RTE_EDEV_LOG_ERR("Reached maximum number of event devices"); 1451 return NULL; 1452 } 1453 1454 eventdev = &rte_eventdevs[dev_id]; 1455 1456 if (eventdev->data == NULL) { 1457 struct rte_eventdev_data *eventdev_data = NULL; 1458 1459 int retval = 1460 eventdev_data_alloc(dev_id, &eventdev_data, socket_id); 1461 1462 if (retval < 0 || eventdev_data == NULL) 1463 return NULL; 1464 1465 eventdev->data = eventdev_data; 1466 1467 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1468 1469 strlcpy(eventdev->data->name, name, 1470 RTE_EVENTDEV_NAME_MAX_LEN); 1471 1472 eventdev->data->dev_id = dev_id; 1473 eventdev->data->socket_id = socket_id; 1474 eventdev->data->dev_started = 0; 1475 } 1476 1477 eventdev->attached = RTE_EVENTDEV_ATTACHED; 1478 eventdev_globals.nb_devs++; 1479 } 1480 1481 return eventdev; 1482 } 1483 1484 int 1485 rte_event_pmd_release(struct rte_eventdev *eventdev) 1486 { 1487 int ret; 1488 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; 1489 const struct rte_memzone *mz; 1490 1491 if (eventdev == NULL) 1492 return -EINVAL; 1493 1494 event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id); 1495 eventdev->attached = RTE_EVENTDEV_DETACHED; 1496 eventdev_globals.nb_devs--; 1497 1498 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1499 rte_free(eventdev->data->dev_private); 1500 1501 /* Generate memzone name */ 1502 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", 1503 eventdev->data->dev_id); 1504 if (ret >= (int)sizeof(mz_name)) 1505 return -EINVAL; 1506 1507 mz = rte_memzone_lookup(mz_name); 1508 if (mz == NULL) 1509 return -ENOMEM; 1510 1511 ret = rte_memzone_free(mz); 1512 if (ret) 1513 return ret; 1514 } 1515 1516 eventdev->data = NULL; 1517 return 0; 1518 } 1519 1520 void 1521 event_dev_probing_finish(struct rte_eventdev *eventdev) 1522 { 1523 if (eventdev == NULL) 1524 return; 1525 1526 event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id, 1527 eventdev); 1528 } 1529 1530 static int 1531 handle_dev_list(const char *cmd __rte_unused, 1532 const char *params __rte_unused, 1533 struct rte_tel_data *d) 1534 { 1535 uint8_t dev_id; 1536 int ndev = rte_event_dev_count(); 1537 1538 if (ndev < 1) 1539 return -1; 1540 1541 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 1542 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) { 1543 if (rte_eventdevs[dev_id].attached == 1544 RTE_EVENTDEV_ATTACHED) 1545 rte_tel_data_add_array_int(d, dev_id); 1546 } 1547 1548 return 0; 1549 } 1550 1551 static int 1552 handle_port_list(const char *cmd __rte_unused, 1553 const char *params, 1554 struct rte_tel_data *d) 1555 { 1556 int i; 1557 uint8_t dev_id; 1558 struct rte_eventdev *dev; 1559 char *end_param; 1560 1561 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1562 return -1; 1563 1564 dev_id = strtoul(params, &end_param, 10); 1565 if (*end_param != '\0') 1566 RTE_EDEV_LOG_DEBUG( 1567 "Extra parameters passed to eventdev telemetry command, ignoring"); 1568 1569 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1570 dev = &rte_eventdevs[dev_id]; 1571 1572 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 1573 for (i = 0; i < dev->data->nb_ports; i++) 1574 rte_tel_data_add_array_int(d, i); 1575 1576 return 0; 1577 } 1578 1579 static int 1580 handle_queue_list(const char *cmd __rte_unused, 1581 const char *params, 1582 struct rte_tel_data *d) 1583 { 1584 int i; 1585 uint8_t dev_id; 1586 struct rte_eventdev *dev; 1587 char *end_param; 1588 1589 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1590 return -1; 1591 1592 dev_id = strtoul(params, &end_param, 10); 1593 if (*end_param != '\0') 1594 RTE_EDEV_LOG_DEBUG( 1595 "Extra parameters passed to eventdev telemetry command, ignoring"); 1596 1597 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1598 dev = &rte_eventdevs[dev_id]; 1599 1600 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 1601 for (i = 0; i < dev->data->nb_queues; i++) 1602 rte_tel_data_add_array_int(d, i); 1603 1604 return 0; 1605 } 1606 1607 static int 1608 handle_queue_links(const char *cmd __rte_unused, 1609 const char *params, 1610 struct rte_tel_data *d) 1611 { 1612 int i, ret, port_id = 0; 1613 char *end_param; 1614 uint8_t dev_id; 1615 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 1616 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV]; 1617 const char *p_param; 1618 1619 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1620 return -1; 1621 1622 /* Get dev ID from parameter string */ 1623 dev_id = strtoul(params, &end_param, 10); 1624 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1625 1626 p_param = strtok(end_param, ","); 1627 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param)) 1628 return -1; 1629 1630 port_id = strtoul(p_param, &end_param, 10); 1631 p_param = strtok(NULL, "\0"); 1632 if (p_param != NULL) 1633 RTE_EDEV_LOG_DEBUG( 1634 "Extra parameters passed to eventdev telemetry command, ignoring"); 1635 1636 ret = rte_event_port_links_get(dev_id, port_id, queues, priorities); 1637 if (ret < 0) 1638 return -1; 1639 1640 rte_tel_data_start_dict(d); 1641 for (i = 0; i < ret; i++) { 1642 char qid_name[32]; 1643 1644 snprintf(qid_name, 31, "qid_%u", queues[i]); 1645 rte_tel_data_add_dict_u64(d, qid_name, priorities[i]); 1646 } 1647 1648 return 0; 1649 } 1650 1651 static int 1652 eventdev_build_telemetry_data(int dev_id, 1653 enum rte_event_dev_xstats_mode mode, 1654 int port_queue_id, 1655 struct rte_tel_data *d) 1656 { 1657 struct rte_event_dev_xstats_name *xstat_names; 1658 unsigned int *ids; 1659 uint64_t *values; 1660 int i, ret, num_xstats; 1661 1662 num_xstats = rte_event_dev_xstats_names_get(dev_id, 1663 mode, 1664 port_queue_id, 1665 NULL, 1666 NULL, 1667 0); 1668 1669 if (num_xstats < 0) 1670 return -1; 1671 1672 /* use one malloc for names */ 1673 xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name)) 1674 * num_xstats); 1675 if (xstat_names == NULL) 1676 return -1; 1677 1678 ids = malloc((sizeof(unsigned int)) * num_xstats); 1679 if (ids == NULL) { 1680 free(xstat_names); 1681 return -1; 1682 } 1683 1684 values = malloc((sizeof(uint64_t)) * num_xstats); 1685 if (values == NULL) { 1686 free(xstat_names); 1687 free(ids); 1688 return -1; 1689 } 1690 1691 ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id, 1692 xstat_names, ids, num_xstats); 1693 if (ret < 0 || ret > num_xstats) { 1694 free(xstat_names); 1695 free(ids); 1696 free(values); 1697 return -1; 1698 } 1699 1700 ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id, 1701 ids, values, num_xstats); 1702 if (ret < 0 || ret > num_xstats) { 1703 free(xstat_names); 1704 free(ids); 1705 free(values); 1706 return -1; 1707 } 1708 1709 rte_tel_data_start_dict(d); 1710 for (i = 0; i < num_xstats; i++) 1711 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 1712 values[i]); 1713 1714 free(xstat_names); 1715 free(ids); 1716 free(values); 1717 return 0; 1718 } 1719 1720 static int 1721 handle_dev_xstats(const char *cmd __rte_unused, 1722 const char *params, 1723 struct rte_tel_data *d) 1724 { 1725 int dev_id; 1726 enum rte_event_dev_xstats_mode mode; 1727 char *end_param; 1728 1729 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1730 return -1; 1731 1732 /* Get dev ID from parameter string */ 1733 dev_id = strtoul(params, &end_param, 10); 1734 if (*end_param != '\0') 1735 RTE_EDEV_LOG_DEBUG( 1736 "Extra parameters passed to eventdev telemetry command, ignoring"); 1737 1738 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1739 1740 mode = RTE_EVENT_DEV_XSTATS_DEVICE; 1741 return eventdev_build_telemetry_data(dev_id, mode, 0, d); 1742 } 1743 1744 static int 1745 handle_port_xstats(const char *cmd __rte_unused, 1746 const char *params, 1747 struct rte_tel_data *d) 1748 { 1749 int dev_id; 1750 int port_queue_id = 0; 1751 enum rte_event_dev_xstats_mode mode; 1752 char *end_param; 1753 const char *p_param; 1754 1755 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1756 return -1; 1757 1758 /* Get dev ID from parameter string */ 1759 dev_id = strtoul(params, &end_param, 10); 1760 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1761 1762 p_param = strtok(end_param, ","); 1763 mode = RTE_EVENT_DEV_XSTATS_PORT; 1764 1765 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param)) 1766 return -1; 1767 1768 port_queue_id = strtoul(p_param, &end_param, 10); 1769 1770 p_param = strtok(NULL, "\0"); 1771 if (p_param != NULL) 1772 RTE_EDEV_LOG_DEBUG( 1773 "Extra parameters passed to eventdev telemetry command, ignoring"); 1774 1775 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d); 1776 } 1777 1778 static int 1779 handle_queue_xstats(const char *cmd __rte_unused, 1780 const char *params, 1781 struct rte_tel_data *d) 1782 { 1783 int dev_id; 1784 int port_queue_id = 0; 1785 enum rte_event_dev_xstats_mode mode; 1786 char *end_param; 1787 const char *p_param; 1788 1789 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1790 return -1; 1791 1792 /* Get dev ID from parameter string */ 1793 dev_id = strtoul(params, &end_param, 10); 1794 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1795 1796 p_param = strtok(end_param, ","); 1797 mode = RTE_EVENT_DEV_XSTATS_QUEUE; 1798 1799 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param)) 1800 return -1; 1801 1802 port_queue_id = strtoul(p_param, &end_param, 10); 1803 1804 p_param = strtok(NULL, "\0"); 1805 if (p_param != NULL) 1806 RTE_EDEV_LOG_DEBUG( 1807 "Extra parameters passed to eventdev telemetry command, ignoring"); 1808 1809 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d); 1810 } 1811 1812 RTE_INIT(eventdev_init_telemetry) 1813 { 1814 rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list, 1815 "Returns list of available eventdevs. Takes no parameters"); 1816 rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list, 1817 "Returns list of available ports. Parameter: DevID"); 1818 rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list, 1819 "Returns list of available queues. Parameter: DevID"); 1820 1821 rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats, 1822 "Returns stats for an eventdev. Parameter: DevID"); 1823 rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats, 1824 "Returns stats for an eventdev port. Params: DevID,PortID"); 1825 rte_telemetry_register_cmd("/eventdev/queue_xstats", 1826 handle_queue_xstats, 1827 "Returns stats for an eventdev queue. Params: DevID,QueueID"); 1828 rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links, 1829 "Returns links for an eventdev port. Params: DevID,QueueID"); 1830 } 1831