1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016 Cavium, Inc. 3 * Copyright(c) 2016-2018 Intel Corporation. 4 * Copyright 2016 NXP 5 * All rights reserved. 6 */ 7 8 #ifndef _RTE_EVENTDEV_H_ 9 #define _RTE_EVENTDEV_H_ 10 11 /** 12 * @file 13 * 14 * RTE Event Device API 15 * ==================== 16 * 17 * In a traditional DPDK application model, the application polls Ethdev port RX 18 * queues to look for work, and processing is done in a run-to-completion manner, 19 * after which the packets are transmitted on a Ethdev TX queue. Load is 20 * distributed by statically assigning ports and queues to lcores, and NIC 21 * receive-side scaling (RSS), or similar, is employed to distribute network flows 22 * (and thus work) on the same port across multiple RX queues. 23 * 24 * In contrast, in an event-driven model, as supported by this "eventdev" library, 25 * incoming packets (or other input events) are fed into an event device, which 26 * schedules those packets across the available lcores, in accordance with its configuration. 27 * This event-driven programming model offers applications automatic multicore scaling, 28 * dynamic load balancing, pipelining, packet order maintenance, synchronization, 29 * and prioritization/quality of service. 30 * 31 * The Event Device API is composed of two parts: 32 * 33 * - The application-oriented Event API that includes functions to setup 34 * an event device (configure it, setup its queues, ports and start it), to 35 * establish the links between queues and ports to receive events, and so on. 36 * 37 * - The driver-oriented Event API that exports a function allowing 38 * an event poll Mode Driver (PMD) to register itself as 39 * an event device driver. 40 * 41 * Application-oriented Event API 42 * ------------------------------ 43 * 44 * Event device components: 45 * 46 * +-----------------+ 47 * | +-------------+ | 48 * +-------+ | | flow 0 | | 49 * |Packet | | +-------------+ | 50 * |event | | +-------------+ | 51 * | | | | flow 1 | |port_link(port0, queue0) 52 * +-------+ | +-------------+ | | +--------+ 53 * +-------+ | +-------------+ o-----v-----o |dequeue +------+ 54 * |Crypto | | | flow n | | | event +------->|Core 0| 55 * |work | | +-------------+ o----+ | port 0 | | | 56 * |done ev| | event queue 0 | | +--------+ +------+ 57 * +-------+ +-----------------+ | 58 * +-------+ | 59 * |Timer | +-----------------+ | +--------+ 60 * |expiry | | +-------------+ | +------o |dequeue +------+ 61 * |event | | | flow 0 | o-----------o event +------->|Core 1| 62 * +-------+ | +-------------+ | +----o port 1 | | | 63 * Event enqueue | +-------------+ | | +--------+ +------+ 64 * o-------------> | | flow 1 | | | 65 * enqueue( | +-------------+ | | 66 * queue_id, | | | +--------+ +------+ 67 * flow_id, | +-------------+ | | | |dequeue |Core 2| 68 * sched_type, | | flow n | o-----------o event +------->| | 69 * event_type, | +-------------+ | | | port 2 | +------+ 70 * subev_type, | event queue 1 | | +--------+ 71 * event) +-----------------+ | +--------+ 72 * | | |dequeue +------+ 73 * +-------+ +-----------------+ | | event +------->|Core n| 74 * |Core | | +-------------+ o-----------o port n | | | 75 * |(SW) | | | flow 0 | | | +--------+ +--+---+ 76 * |event | | +-------------+ | | | 77 * +-------+ | +-------------+ | | | 78 * ^ | | flow 1 | | | | 79 * | | +-------------+ o------+ | 80 * | | +-------------+ | | 81 * | | | flow n | | | 82 * | | +-------------+ | | 83 * | | event queue n | | 84 * | +-----------------+ | 85 * | | 86 * +-----------------------------------------------------------+ 87 * 88 * **Event device**: A hardware or software-based event scheduler. 89 * 90 * **Event**: Represents an item of work and is the smallest unit of scheduling. 91 * An event carries metadata, such as queue ID, scheduling type, and event priority, 92 * and data such as one or more packets or other kinds of buffers. 93 * Some examples of events are: 94 * - a software-generated item of work originating from a lcore, 95 * perhaps carrying a packet to be processed. 96 * - a crypto work completion notification. 97 * - a timer expiry notification. 98 * 99 * **Event queue**: A queue containing events that are to be scheduled by the event device. 100 * An event queue contains events of different flows associated with scheduling 101 * types, such as atomic, ordered, or parallel. 102 * Each event given to an event device must have a valid event queue id field in the metadata, 103 * to specify on which event queue in the device the event must be placed, 104 * for later scheduling. 105 * 106 * **Event port**: An application's interface into the event dev for enqueue and 107 * dequeue operations. Each event port can be linked with one or more 108 * event queues for dequeue operations. 109 * Enqueue and dequeue from a port is not thread-safe, and the expected use-case is 110 * that each port is polled by only a single lcore. [If this is not the case, 111 * a suitable synchronization mechanism should be used to prevent simultaneous 112 * access from multiple lcores.] 113 * To schedule events to an lcore, the event device will schedule them to the event port(s) 114 * being polled by that lcore. 115 * 116 * *NOTE*: By default, all the functions of the Event Device API exported by a PMD 117 * are non-thread-safe functions, which must not be invoked on the same object in parallel on 118 * different logical cores. 119 * For instance, the dequeue function of a PMD cannot be invoked in parallel on two logical 120 * cores to operate on same event port. Of course, this function 121 * can be invoked in parallel by different logical cores on different ports. 122 * It is the responsibility of the upper level application to enforce this rule. 123 * 124 * In all functions of the Event API, the Event device is 125 * designated by an integer >= 0 named the device identifier *dev_id* 126 * 127 * The functions exported by the application Event API to setup a device 128 * must be invoked in the following order: 129 * - rte_event_dev_configure() 130 * - rte_event_queue_setup() 131 * - rte_event_port_setup() 132 * - rte_event_port_link() 133 * - rte_event_dev_start() 134 * 135 * Then, the application can invoke, in any order, the functions 136 * exported by the Event API to dequeue events, enqueue events, 137 * and link and unlink event queue(s) to event ports. 138 * 139 * Before configuring a device, an application should call rte_event_dev_info_get() 140 * to determine the capabilities of the event device, and any queue or port 141 * limits of that device. The parameters set in the various device configuration 142 * structures may need to be adjusted based on the max values provided in the 143 * device information structure returned from the rte_event_dev_info_get() API. 144 * An application may use rte_event_queue_default_conf_get() or 145 * rte_event_port_default_conf_get() to get the default configuration 146 * to set up an event queue or event port by overriding few default values. 147 * 148 * If the application wants to change the configuration (i.e. call 149 * rte_event_dev_configure(), rte_event_queue_setup(), or 150 * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the 151 * device and then do the reconfiguration before calling rte_event_dev_start() 152 * again. The schedule, enqueue and dequeue functions should not be invoked 153 * when the device is stopped. 154 * 155 * Finally, an application can close an Event device by invoking the 156 * rte_event_dev_close() function. Once closed, a device cannot be 157 * reconfigured or restarted. 158 * 159 * Driver-Oriented Event API 160 * ------------------------- 161 * 162 * At the Event driver level, Event devices are represented by a generic 163 * data structure of type *rte_event_dev*. 164 * 165 * Event devices are dynamically registered during the PCI/SoC device probing 166 * phase performed at EAL initialization time. 167 * When an Event device is being probed, an *rte_event_dev* structure is allocated 168 * for it and the event_dev_init() function supplied by the Event driver 169 * is invoked to properly initialize the device. 170 * 171 * The role of the device init function is to reset the device hardware or 172 * to initialize the software event driver implementation. 173 * 174 * If the device init operation is successful, the device is assigned a device 175 * id (dev_id) for application use. 176 * Otherwise, the *rte_event_dev* structure is freed. 177 * 178 * Each function of the application Event API invokes a specific function 179 * of the PMD that controls the target device designated by its device 180 * identifier. 181 * 182 * For this purpose, all device-specific functions of an Event driver are 183 * supplied through a set of pointers contained in a generic structure of type 184 * *event_dev_ops*. 185 * The address of the *event_dev_ops* structure is stored in the *rte_event_dev* 186 * structure by the device init function of the Event driver, which is 187 * invoked during the PCI/SoC device probing phase, as explained earlier. 188 * 189 * In other words, each function of the Event API simply retrieves the 190 * *rte_event_dev* structure associated with the device identifier and 191 * performs an indirect invocation of the corresponding driver function 192 * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure. 193 * 194 * For performance reasons, the addresses of the fast-path functions of the 195 * event driver are not contained in the *event_dev_ops* structure. 196 * Instead, they are directly stored at the beginning of the *rte_event_dev* 197 * structure to avoid an extra indirect memory access during their invocation. 198 * 199 * Event Enqueue, Dequeue and Scheduling 200 * ------------------------------------- 201 * 202 * RTE event device drivers do not use interrupts for enqueue or dequeue 203 * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue 204 * functions to applications. 205 * 206 * The events are injected to event device through *enqueue* operation by 207 * event producers in the system. The typical event producers are ethdev 208 * subsystem for generating packet events, CPU(SW) for generating events based 209 * on different stages of application processing, cryptodev for generating 210 * crypto work completion notification etc 211 * 212 * The *dequeue* operation gets one or more events from the event ports. 213 * The application processes the events and sends them to a downstream event queue through 214 * rte_event_enqueue_burst(), if it is an intermediate stage of event processing. 215 * On the final stage of processing, the application may use the Tx adapter API for maintaining 216 * the event ingress order while sending the packet/event on the wire via NIC Tx. 217 * 218 * The point at which events are scheduled to ports depends on the device. 219 * For hardware devices, scheduling occurs asynchronously without any software 220 * intervention. Software schedulers can either be distributed 221 * (each worker thread schedules events to its own port) or centralized 222 * (a dedicated thread schedules to all ports). Distributed software schedulers 223 * perform the scheduling inside the enqueue or dequeue functions, whereas centralized 224 * software schedulers need a dedicated service core for scheduling. 225 * The absence of the RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag 226 * indicates that the device is centralized and thus needs a dedicated scheduling 227 * thread (generally an RTE service that should be mapped to one or more service cores) 228 * that repeatedly calls the software specific scheduling function. 229 * 230 * An event driven worker thread has following typical workflow on fastpath: 231 * \code{.c} 232 * while (1) { 233 * rte_event_dequeue_burst(...); 234 * (event processing) 235 * rte_event_enqueue_burst(...); 236 * } 237 * \endcode 238 */ 239 240 #ifdef __cplusplus 241 extern "C" { 242 #endif 243 244 #include <rte_compat.h> 245 #include <rte_common.h> 246 #include <rte_errno.h> 247 #include <rte_mbuf_pool_ops.h> 248 #include <rte_mempool.h> 249 250 #include "rte_eventdev_trace_fp.h" 251 252 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */ 253 struct rte_event; 254 255 /* Event device capability bitmap flags */ 256 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0) 257 /**< Event scheduling prioritization is based on the priority and weight 258 * associated with each event queue. 259 * 260 * Events from a queue with highest priority 261 * are scheduled first. If the queues are of same priority, weight of the queues 262 * are considered to select a queue in a weighted round robin fashion. 263 * Subsequent dequeue calls from an event port could see events from the same 264 * event queue, if the queue is configured with an affinity count. Affinity 265 * count is the number of subsequent dequeue calls, in which an event port 266 * should use the same event queue if the queue is non-empty 267 * 268 * NOTE: A device may use both queue prioritization and event prioritization 269 * (@ref RTE_EVENT_DEV_CAP_EVENT_QOS capability) when making packet scheduling decisions. 270 * 271 * @see rte_event_queue_setup() 272 * @see rte_event_queue_attr_set() 273 */ 274 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1) 275 /**< Event scheduling prioritization is based on the priority associated with 276 * each event. 277 * 278 * Priority of each event is supplied in *rte_event* structure 279 * on each enqueue operation. 280 * If this capability is not set, the priority field of the event structure 281 * is ignored for each event. 282 * 283 * NOTE: A device may use both queue prioritization (@ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability) 284 * and event prioritization when making packet scheduling decisions. 285 286 * @see rte_event_enqueue_burst() 287 */ 288 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2) 289 /**< Event device operates in distributed scheduling mode. 290 * 291 * In distributed scheduling mode, event scheduling happens in HW or 292 * rte_event_dequeue_burst() / rte_event_enqueue_burst() or the combination of these two. 293 * If the flag is not set then eventdev is centralized and thus needs a 294 * dedicated service core that acts as a scheduling thread. 295 * 296 * @see rte_event_dev_service_id_get() 297 */ 298 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3) 299 /**< Event device is capable of accepting enqueued events, of any type 300 * advertised as supported by the device, to all destination queues. 301 * 302 * When this capability is set, and @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag is set 303 * in @ref rte_event_queue_conf.event_queue_cfg, the "schedule_type" field of the 304 * @ref rte_event_queue_conf structure is ignored when a queue is being configured. 305 * Instead the "sched_type" field of each event enqueued is used to 306 * select the scheduling to be performed on that event. 307 * 308 * If this capability is not set, or the configuration flag is not set, 309 * the queue only supports events of the *RTE_SCHED_TYPE_* type specified 310 * in the @ref rte_event_queue_conf structure at time of configuration. 311 * The behaviour when events of other scheduling types are sent to the queue is 312 * undefined. 313 * 314 * @see RTE_EVENT_QUEUE_CFG_ALL_TYPES 315 * @see RTE_SCHED_TYPE_ATOMIC 316 * @see RTE_SCHED_TYPE_ORDERED 317 * @see RTE_SCHED_TYPE_PARALLEL 318 * @see rte_event_queue_conf.event_queue_cfg 319 * @see rte_event_queue_conf.schedule_type 320 * @see rte_event_enqueue_burst() 321 */ 322 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4) 323 /**< Event device is capable of operating in burst mode for enqueue(forward, 324 * release) and dequeue operation. 325 * 326 * If this capability is not set, application 327 * can still use the rte_event_dequeue_burst() and rte_event_enqueue_burst() but 328 * PMD accepts or returns only one event at a time. 329 * 330 * @see rte_event_dequeue_burst() 331 * @see rte_event_enqueue_burst() 332 */ 333 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5) 334 /**< Event device ports support disabling the implicit release feature, in 335 * which the port will release all unreleased events in its dequeue operation. 336 * 337 * If this capability is set and the port is configured with implicit release 338 * disabled, the application is responsible for explicitly releasing events 339 * using either the @ref RTE_EVENT_OP_FORWARD or the @ref RTE_EVENT_OP_RELEASE event 340 * enqueue operations. 341 * 342 * @see rte_event_dequeue_burst() 343 * @see rte_event_enqueue_burst() 344 */ 345 346 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6) 347 /**< Event device is capable of operating in non-sequential mode. 348 * 349 * The path of the event is not necessary to be sequential. Application can change 350 * the path of event at runtime and events may be sent to queues in any order. 351 * 352 * If the flag is not set, then event each event will follow a path from queue 0 353 * to queue 1 to queue 2 etc. 354 * The eventdev will return an error when the application enqueues an event for a 355 * qid which is not the next in the sequence. 356 */ 357 358 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7) 359 /**< Event device is capable of reconfiguring the queue/port link at runtime. 360 * 361 * If the flag is not set, the eventdev queue/port link is only can be 362 * configured during initialization, or by stopping the device and 363 * then later restarting it after reconfiguration. 364 * 365 * @see rte_event_port_link() 366 * @see rte_event_port_unlink() 367 */ 368 369 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8) 370 /**< Event device is capable of setting up links between multiple queues and a single port. 371 * 372 * If the flag is not set, each port may only be linked to a single queue, and 373 * so can only receive events from that queue. 374 * However, each queue may be linked to multiple ports. 375 * 376 * @see rte_event_port_link() 377 */ 378 379 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9) 380 /**< Event device preserves the flow ID from the enqueued event to the dequeued event. 381 * 382 * If this flag is not set, 383 * the content of the flow-id field in dequeued events is implementation dependent. 384 * 385 * @see rte_event_dequeue_burst() 386 */ 387 388 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10) 389 /**< Event device *does not* require calls to rte_event_maintain(). 390 * 391 * An event device that does not set this flag requires calls to 392 * rte_event_maintain() during periods when neither 393 * rte_event_dequeue_burst() nor rte_event_enqueue_burst() are called 394 * on a port. This will allow the event device to perform internal 395 * processing, such as flushing buffered events, return credits to a 396 * global pool, or process signaling related to load balancing. 397 * 398 * @see rte_event_maintain() 399 */ 400 401 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11) 402 /**< Event device is capable of changing the queue attributes at runtime i.e 403 * after rte_event_queue_setup() or rte_event_dev_start() call sequence. 404 * 405 * If this flag is not set, event queue attributes can only be configured during 406 * rte_event_queue_setup(). 407 * 408 * @see rte_event_queue_setup() 409 */ 410 411 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12) 412 /**< Event device is capable of supporting multiple link profiles per event port. 413 * 414 * When set, the value of `rte_event_dev_info::max_profiles_per_port` is greater 415 * than one, and multiple profiles may be configured and then switched at runtime. 416 * If not set, only a single profile may be configured, which may itself be 417 * runtime adjustable (if @ref RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK is set). 418 * 419 * @see rte_event_port_profile_links_set() 420 * @see rte_event_port_profile_links_get() 421 * @see rte_event_port_profile_switch() 422 * @see RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK 423 */ 424 425 #define RTE_EVENT_DEV_CAP_ATOMIC (1ULL << 13) 426 /**< Event device is capable of atomic scheduling. 427 * When this flag is set, the application can configure queues with scheduling type 428 * atomic on this event device. 429 * 430 * @see RTE_SCHED_TYPE_ATOMIC 431 */ 432 433 #define RTE_EVENT_DEV_CAP_ORDERED (1ULL << 14) 434 /**< Event device is capable of ordered scheduling. 435 * When this flag is set, the application can configure queues with scheduling type 436 * ordered on this event device. 437 * 438 * @see RTE_SCHED_TYPE_ORDERED 439 */ 440 441 #define RTE_EVENT_DEV_CAP_PARALLEL (1ULL << 15) 442 /**< Event device is capable of parallel scheduling. 443 * When this flag is set, the application can configure queues with scheduling type 444 * parallel on this event device. 445 * 446 * @see RTE_SCHED_TYPE_PARALLEL 447 */ 448 449 /* Event device priority levels */ 450 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0 451 /**< Highest priority level for events and queues. 452 * 453 * @see rte_event_queue_setup() 454 * @see rte_event_enqueue_burst() 455 * @see rte_event_port_link() 456 */ 457 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128 458 /**< Normal priority level for events and queues. 459 * 460 * @see rte_event_queue_setup() 461 * @see rte_event_enqueue_burst() 462 * @see rte_event_port_link() 463 */ 464 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255 465 /**< Lowest priority level for events and queues. 466 * 467 * @see rte_event_queue_setup() 468 * @see rte_event_enqueue_burst() 469 * @see rte_event_port_link() 470 */ 471 472 /* Event queue scheduling weights */ 473 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255 474 /**< Highest weight of an event queue. 475 * 476 * @see rte_event_queue_attr_get() 477 * @see rte_event_queue_attr_set() 478 */ 479 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0 480 /**< Lowest weight of an event queue. 481 * 482 * @see rte_event_queue_attr_get() 483 * @see rte_event_queue_attr_set() 484 */ 485 486 /* Event queue scheduling affinity */ 487 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255 488 /**< Highest scheduling affinity of an event queue. 489 * 490 * @see rte_event_queue_attr_get() 491 * @see rte_event_queue_attr_set() 492 */ 493 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0 494 /**< Lowest scheduling affinity of an event queue. 495 * 496 * @see rte_event_queue_attr_get() 497 * @see rte_event_queue_attr_set() 498 */ 499 500 /** 501 * Get the total number of event devices that have been successfully 502 * initialised. 503 * 504 * @return 505 * The total number of usable event devices. 506 */ 507 uint8_t 508 rte_event_dev_count(void); 509 510 /** 511 * Get the device identifier for the named event device. 512 * 513 * @param name 514 * Event device name to select the event device identifier. 515 * 516 * @return 517 * Returns event device identifier on success. 518 * - <0: Failure to find named event device. 519 */ 520 int 521 rte_event_dev_get_dev_id(const char *name); 522 523 /** 524 * Return the NUMA socket to which a device is connected. 525 * 526 * @param dev_id 527 * The identifier of the device. 528 * @return 529 * The NUMA socket id to which the device is connected or 530 * a default of zero if the socket could not be determined. 531 * -(-EINVAL) dev_id value is out of range. 532 */ 533 int 534 rte_event_dev_socket_id(uint8_t dev_id); 535 536 /** 537 * Event device information 538 */ 539 struct rte_event_dev_info { 540 const char *driver_name; /**< Event driver name */ 541 struct rte_device *dev; /**< Device information */ 542 uint32_t min_dequeue_timeout_ns; 543 /**< Minimum supported global dequeue timeout(ns) by this device */ 544 uint32_t max_dequeue_timeout_ns; 545 /**< Maximum supported global dequeue timeout(ns) by this device */ 546 uint32_t dequeue_timeout_ns; 547 /**< Configured global dequeue timeout(ns) for this device */ 548 uint8_t max_event_queues; 549 /**< Maximum event_queues supported by this device */ 550 uint32_t max_event_queue_flows; 551 /**< Maximum supported flows in an event queue by this device*/ 552 uint8_t max_event_queue_priority_levels; 553 /**< Maximum number of event queue priority levels by this device. 554 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability 555 */ 556 uint8_t max_event_priority_levels; 557 /**< Maximum number of event priority levels by this device. 558 * Valid when the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability 559 */ 560 uint8_t max_event_ports; 561 /**< Maximum number of event ports supported by this device */ 562 uint8_t max_event_port_dequeue_depth; 563 /**< Maximum number of events can be dequeued at a time from an 564 * event port by this device. 565 * A device that does not support bulk dequeue will set this as 1. 566 */ 567 uint32_t max_event_port_enqueue_depth; 568 /**< Maximum number of events can be enqueued at a time from an 569 * event port by this device. 570 * A device that does not support bulk enqueue will set this as 1. 571 */ 572 uint8_t max_event_port_links; 573 /**< Maximum number of queues that can be linked to a single event 574 * port by this device. 575 */ 576 int32_t max_num_events; 577 /**< A *closed system* event dev has a limit on the number of events it 578 * can manage at a time. An *open system* event dev does not have a 579 * limit and will specify this as -1. 580 */ 581 uint32_t event_dev_cap; 582 /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/ 583 uint8_t max_single_link_event_port_queue_pairs; 584 /**< Maximum number of event ports and queues that are optimized for 585 * (and only capable of) single-link configurations supported by this 586 * device. These ports and queues are not accounted for in 587 * max_event_ports or max_event_queues. 588 */ 589 uint8_t max_profiles_per_port; 590 /**< Maximum number of event queue profiles per event port. 591 * A device that doesn't support multiple profiles will set this as 1. 592 */ 593 }; 594 595 /** 596 * Retrieve the contextual information of an event device. 597 * 598 * @param dev_id 599 * The identifier of the device. 600 * 601 * @param[out] dev_info 602 * A pointer to a structure of type *rte_event_dev_info* to be filled with the 603 * contextual information of the device. 604 * 605 * @return 606 * - 0: Success, driver updates the contextual information of the event device 607 * - <0: Error code returned by the driver info get function. 608 */ 609 int 610 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info); 611 612 /** 613 * The count of ports. 614 */ 615 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0 616 /** 617 * The count of queues. 618 */ 619 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1 620 /** 621 * The status of the device, zero for stopped, non-zero for started. 622 */ 623 #define RTE_EVENT_DEV_ATTR_STARTED 2 624 625 /** 626 * Get an attribute from a device. 627 * 628 * @param dev_id Eventdev id 629 * @param attr_id The attribute ID to retrieve 630 * @param[out] attr_value A pointer that will be filled in with the attribute 631 * value if successful. 632 * 633 * @return 634 * - 0: Successfully retrieved attribute value 635 * - -EINVAL: Invalid device or *attr_id* provided, or *attr_value* is NULL 636 */ 637 int 638 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, 639 uint32_t *attr_value); 640 641 642 /* Event device configuration bitmap flags */ 643 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0) 644 /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns. 645 * @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst() 646 */ 647 648 /** Event device configuration structure */ 649 struct rte_event_dev_config { 650 uint32_t dequeue_timeout_ns; 651 /**< rte_event_dequeue_burst() timeout on this device. 652 * This value should be in the range of *min_dequeue_timeout_ns* and 653 * *max_dequeue_timeout_ns* which previously provided in 654 * rte_event_dev_info_get() 655 * The value 0 is allowed, in which case, default dequeue timeout used. 656 * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT 657 */ 658 int32_t nb_events_limit; 659 /**< In a *closed system* this field is the limit on maximum number of 660 * events that can be inflight in the eventdev at a given time. The 661 * limit is required to ensure that the finite space in a closed system 662 * is not overwhelmed. The value cannot exceed the *max_num_events* 663 * as provided by rte_event_dev_info_get(). 664 * This value should be set to -1 for *open system*. 665 */ 666 uint8_t nb_event_queues; 667 /**< Number of event queues to configure on this device. 668 * This value cannot exceed the *max_event_queues* which previously 669 * provided in rte_event_dev_info_get() 670 */ 671 uint8_t nb_event_ports; 672 /**< Number of event ports to configure on this device. 673 * This value cannot exceed the *max_event_ports* which previously 674 * provided in rte_event_dev_info_get() 675 */ 676 uint32_t nb_event_queue_flows; 677 /**< Number of flows for any event queue on this device. 678 * This value cannot exceed the *max_event_queue_flows* which previously 679 * provided in rte_event_dev_info_get() 680 */ 681 uint32_t nb_event_port_dequeue_depth; 682 /**< Maximum number of events can be dequeued at a time from an 683 * event port by this device. 684 * This value cannot exceed the *max_event_port_dequeue_depth* 685 * which previously provided in rte_event_dev_info_get(). 686 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. 687 * @see rte_event_port_setup() 688 */ 689 uint32_t nb_event_port_enqueue_depth; 690 /**< Maximum number of events can be enqueued at a time from an 691 * event port by this device. 692 * This value cannot exceed the *max_event_port_enqueue_depth* 693 * which previously provided in rte_event_dev_info_get(). 694 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. 695 * @see rte_event_port_setup() 696 */ 697 uint32_t event_dev_cfg; 698 /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/ 699 uint8_t nb_single_link_event_port_queues; 700 /**< Number of event ports and queues that will be singly-linked to 701 * each other. These are a subset of the overall event ports and 702 * queues; this value cannot exceed *nb_event_ports* or 703 * *nb_event_queues*. If the device has ports and queues that are 704 * optimized for single-link usage, this field is a hint for how many 705 * to allocate; otherwise, regular event ports and queues can be used. 706 */ 707 }; 708 709 /** 710 * Configure an event device. 711 * 712 * This function must be invoked first before any other function in the 713 * API. This function can also be re-invoked when a device is in the 714 * stopped state. 715 * 716 * The caller may use rte_event_dev_info_get() to get the capability of each 717 * resources available for this event device. 718 * 719 * @param dev_id 720 * The identifier of the device to configure. 721 * @param dev_conf 722 * The event device configuration structure. 723 * 724 * @return 725 * - 0: Success, device configured. 726 * - <0: Error code returned by the driver configuration function. 727 */ 728 int 729 rte_event_dev_configure(uint8_t dev_id, 730 const struct rte_event_dev_config *dev_conf); 731 732 /* Event queue specific APIs */ 733 734 /* Event queue configuration bitmap flags */ 735 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0) 736 /**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue 737 * 738 * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL 739 * @see rte_event_enqueue_burst() 740 */ 741 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1) 742 /**< This event queue links only to a single event port. 743 * 744 * @see rte_event_port_setup(), rte_event_port_link() 745 */ 746 747 /** Event queue configuration structure */ 748 struct rte_event_queue_conf { 749 uint32_t nb_atomic_flows; 750 /**< The maximum number of active flows this queue can track at any 751 * given time. If the queue is configured for atomic scheduling (by 752 * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg 753 * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the 754 * value must be in the range of [1, nb_event_queue_flows], which was 755 * previously provided in rte_event_dev_configure(). 756 */ 757 uint32_t nb_atomic_order_sequences; 758 /**< The maximum number of outstanding events waiting to be 759 * reordered by this queue. In other words, the number of entries in 760 * this queue’s reorder buffer.When the number of events in the 761 * reorder buffer reaches to *nb_atomic_order_sequences* then the 762 * scheduler cannot schedule the events from this queue and invalid 763 * event will be returned from dequeue until one or more entries are 764 * freed up/released. 765 * If the queue is configured for ordered scheduling (by applying the 766 * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or 767 * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must 768 * be in the range of [1, nb_event_queue_flows], which was 769 * previously supplied to rte_event_dev_configure(). 770 */ 771 uint32_t event_queue_cfg; 772 /**< Queue cfg flags(EVENT_QUEUE_CFG_) */ 773 uint8_t schedule_type; 774 /**< Queue schedule type(RTE_SCHED_TYPE_*). 775 * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in 776 * event_queue_cfg. 777 */ 778 uint8_t priority; 779 /**< Priority for this event queue relative to other event queues. 780 * The requested priority should in the range of 781 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST]. 782 * The implementation shall normalize the requested priority to 783 * event device supported priority value. 784 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability 785 */ 786 uint8_t weight; 787 /**< Weight of the event queue relative to other event queues. 788 * The requested weight should be in the range of 789 * [RTE_EVENT_DEV_WEIGHT_HIGHEST, RTE_EVENT_DEV_WEIGHT_LOWEST]. 790 * The implementation shall normalize the requested weight to event 791 * device supported weight value. 792 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability. 793 */ 794 uint8_t affinity; 795 /**< Affinity of the event queue relative to other event queues. 796 * The requested affinity should be in the range of 797 * [RTE_EVENT_DEV_AFFINITY_HIGHEST, RTE_EVENT_DEV_AFFINITY_LOWEST]. 798 * The implementation shall normalize the requested affinity to event 799 * device supported affinity value. 800 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability. 801 */ 802 }; 803 804 /** 805 * Retrieve the default configuration information of an event queue designated 806 * by its *queue_id* from the event driver for an event device. 807 * 808 * This function intended to be used in conjunction with rte_event_queue_setup() 809 * where caller needs to set up the queue by overriding few default values. 810 * 811 * @param dev_id 812 * The identifier of the device. 813 * @param queue_id 814 * The index of the event queue to get the configuration information. 815 * The value must be in the range [0, nb_event_queues - 1] 816 * previously supplied to rte_event_dev_configure(). 817 * @param[out] queue_conf 818 * The pointer to the default event queue configuration data. 819 * @return 820 * - 0: Success, driver updates the default event queue configuration data. 821 * - <0: Error code returned by the driver info get function. 822 * 823 * @see rte_event_queue_setup() 824 */ 825 int 826 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, 827 struct rte_event_queue_conf *queue_conf); 828 829 /** 830 * Allocate and set up an event queue for an event device. 831 * 832 * @param dev_id 833 * The identifier of the device. 834 * @param queue_id 835 * The index of the event queue to setup. The value must be in the range 836 * [0, nb_event_queues - 1] previously supplied to rte_event_dev_configure(). 837 * @param queue_conf 838 * The pointer to the configuration data to be used for the event queue. 839 * NULL value is allowed, in which case default configuration used. 840 * 841 * @see rte_event_queue_default_conf_get() 842 * 843 * @return 844 * - 0: Success, event queue correctly set up. 845 * - <0: event queue configuration failed 846 */ 847 int 848 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, 849 const struct rte_event_queue_conf *queue_conf); 850 851 /** 852 * The priority of the queue. 853 */ 854 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0 855 /** 856 * The number of atomic flows configured for the queue. 857 */ 858 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1 859 /** 860 * The number of atomic order sequences configured for the queue. 861 */ 862 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2 863 /** 864 * The cfg flags for the queue. 865 */ 866 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3 867 /** 868 * The schedule type of the queue. 869 */ 870 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4 871 /** 872 * The weight of the queue. 873 */ 874 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5 875 /** 876 * Affinity of the queue. 877 */ 878 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6 879 880 /** 881 * Get an attribute from a queue. 882 * 883 * @param dev_id 884 * Eventdev id 885 * @param queue_id 886 * Eventdev queue id 887 * @param attr_id 888 * The attribute ID to retrieve 889 * @param[out] attr_value 890 * A pointer that will be filled in with the attribute value if successful 891 * 892 * @return 893 * - 0: Successfully returned value 894 * - -EINVAL: invalid device, queue or attr_id provided, or attr_value was 895 * NULL 896 * - -EOVERFLOW: returned when attr_id is set to 897 * RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to 898 * RTE_EVENT_QUEUE_CFG_ALL_TYPES 899 */ 900 int 901 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, 902 uint32_t *attr_value); 903 904 /** 905 * Set an event queue attribute. 906 * 907 * @param dev_id 908 * Eventdev id 909 * @param queue_id 910 * Eventdev queue id 911 * @param attr_id 912 * The attribute ID to set 913 * @param attr_value 914 * The attribute value to set 915 * 916 * @return 917 * - 0: Successfully set attribute. 918 * - -EINVAL: invalid device, queue or attr_id. 919 * - -ENOTSUP: device does not support setting the event attribute. 920 * - <0: failed to set event queue attribute 921 */ 922 int 923 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, 924 uint64_t attr_value); 925 926 /* Event port specific APIs */ 927 928 /* Event port configuration bitmap flags */ 929 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0) 930 /**< Configure the port not to release outstanding events in 931 * rte_event_dev_dequeue_burst(). If set, all events received through 932 * the port must be explicitly released with RTE_EVENT_OP_RELEASE or 933 * RTE_EVENT_OP_FORWARD. Must be unset if the device is not 934 * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable. 935 */ 936 #define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1) 937 /**< This event port links only to a single event queue. 938 * 939 * @see rte_event_port_setup(), rte_event_port_link() 940 */ 941 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER (1ULL << 2) 942 /**< Hint that this event port will primarily enqueue events to the system. 943 * A PMD can optimize its internal workings by assuming that this port is 944 * primarily going to enqueue NEW events. 945 * 946 * Note that this flag is only a hint, so PMDs must operate under the 947 * assumption that any port can enqueue an event with any type of op. 948 * 949 * @see rte_event_port_setup() 950 */ 951 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER (1ULL << 3) 952 /**< Hint that this event port will primarily dequeue events from the system. 953 * A PMD can optimize its internal workings by assuming that this port is 954 * primarily going to consume events, and not enqueue FORWARD or RELEASE 955 * events. 956 * 957 * Note that this flag is only a hint, so PMDs must operate under the 958 * assumption that any port can enqueue an event with any type of op. 959 * 960 * @see rte_event_port_setup() 961 */ 962 #define RTE_EVENT_PORT_CFG_HINT_WORKER (1ULL << 4) 963 /**< Hint that this event port will primarily pass existing events through. 964 * A PMD can optimize its internal workings by assuming that this port is 965 * primarily going to FORWARD events, and not enqueue NEW or RELEASE events 966 * often. 967 * 968 * Note that this flag is only a hint, so PMDs must operate under the 969 * assumption that any port can enqueue an event with any type of op. 970 * 971 * @see rte_event_port_setup() 972 */ 973 974 /** Event port configuration structure */ 975 struct rte_event_port_conf { 976 int32_t new_event_threshold; 977 /**< A backpressure threshold for new event enqueues on this port. 978 * Use for *closed system* event dev where event capacity is limited, 979 * and cannot exceed the capacity of the event dev. 980 * Configuring ports with different thresholds can make higher priority 981 * traffic less likely to be backpressured. 982 * For example, a port used to inject NIC Rx packets into the event dev 983 * can have a lower threshold so as not to overwhelm the device, 984 * while ports used for worker pools can have a higher threshold. 985 * This value cannot exceed the *nb_events_limit* 986 * which was previously supplied to rte_event_dev_configure(). 987 * This should be set to '-1' for *open system*. 988 */ 989 uint16_t dequeue_depth; 990 /**< Configure number of bulk dequeues for this event port. 991 * This value cannot exceed the *nb_event_port_dequeue_depth* 992 * which previously supplied to rte_event_dev_configure(). 993 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. 994 */ 995 uint16_t enqueue_depth; 996 /**< Configure number of bulk enqueues for this event port. 997 * This value cannot exceed the *nb_event_port_enqueue_depth* 998 * which previously supplied to rte_event_dev_configure(). 999 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. 1000 */ 1001 uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */ 1002 }; 1003 1004 /** 1005 * Retrieve the default configuration information of an event port designated 1006 * by its *port_id* from the event driver for an event device. 1007 * 1008 * This function intended to be used in conjunction with rte_event_port_setup() 1009 * where caller needs to set up the port by overriding few default values. 1010 * 1011 * @param dev_id 1012 * The identifier of the device. 1013 * @param port_id 1014 * The index of the event port to get the configuration information. 1015 * The value must be in the range [0, nb_event_ports - 1] 1016 * previously supplied to rte_event_dev_configure(). 1017 * @param[out] port_conf 1018 * The pointer to the default event port configuration data 1019 * @return 1020 * - 0: Success, driver updates the default event port configuration data. 1021 * - <0: Error code returned by the driver info get function. 1022 * 1023 * @see rte_event_port_setup() 1024 */ 1025 int 1026 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, 1027 struct rte_event_port_conf *port_conf); 1028 1029 /** 1030 * Allocate and set up an event port for an event device. 1031 * 1032 * @param dev_id 1033 * The identifier of the device. 1034 * @param port_id 1035 * The index of the event port to setup. The value must be in the range 1036 * [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure(). 1037 * @param port_conf 1038 * The pointer to the configuration data to be used for the queue. 1039 * NULL value is allowed, in which case default configuration used. 1040 * 1041 * @see rte_event_port_default_conf_get() 1042 * 1043 * @return 1044 * - 0: Success, event port correctly set up. 1045 * - <0: Port configuration failed 1046 * - (-EDQUOT) Quota exceeded(Application tried to link the queue configured 1047 * with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports) 1048 */ 1049 int 1050 rte_event_port_setup(uint8_t dev_id, uint8_t port_id, 1051 const struct rte_event_port_conf *port_conf); 1052 1053 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id, 1054 struct rte_event event, void *arg); 1055 /**< Callback function prototype that can be passed during 1056 * rte_event_port_release(), invoked once per a released event. 1057 */ 1058 1059 /** 1060 * Quiesce any core specific resources consumed by the event port. 1061 * 1062 * Event ports are generally coupled with lcores, and a given Hardware 1063 * implementation might require the PMD to store port specific data in the 1064 * lcore. 1065 * When the application decides to migrate the event port to another lcore 1066 * or teardown the current lcore it may to call `rte_event_port_quiesce` 1067 * to make sure that all the data associated with the event port are released 1068 * from the lcore, this might also include any prefetched events. 1069 * While releasing the event port from the lcore, this function calls the 1070 * user-provided flush callback once per event. 1071 * 1072 * @note Invocation of this API does not affect the existing port configuration. 1073 * 1074 * @param dev_id 1075 * The identifier of the device. 1076 * @param port_id 1077 * The index of the event port to setup. The value must be in the range 1078 * [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure(). 1079 * @param release_cb 1080 * Callback function invoked once per flushed event. 1081 * @param args 1082 * Argument supplied to callback. 1083 */ 1084 void 1085 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, 1086 rte_eventdev_port_flush_t release_cb, void *args); 1087 1088 /** 1089 * The queue depth of the port on the enqueue side 1090 */ 1091 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0 1092 /** 1093 * The queue depth of the port on the dequeue side 1094 */ 1095 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1 1096 /** 1097 * The new event threshold of the port 1098 */ 1099 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2 1100 /** 1101 * The implicit release disable attribute of the port 1102 */ 1103 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3 1104 1105 /** 1106 * Get an attribute from a port. 1107 * 1108 * @param dev_id 1109 * Eventdev id 1110 * @param port_id 1111 * Eventdev port id 1112 * @param attr_id 1113 * The attribute ID to retrieve 1114 * @param[out] attr_value 1115 * A pointer that will be filled in with the attribute value if successful 1116 * 1117 * @return 1118 * - 0: Successfully returned value 1119 * - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL 1120 */ 1121 int 1122 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, 1123 uint32_t *attr_value); 1124 1125 /** 1126 * Start an event device. 1127 * 1128 * The device start step is the last one and consists of setting the event 1129 * queues to start accepting the events and schedules to event ports. 1130 * 1131 * On success, all basic functions exported by the API (event enqueue, 1132 * event dequeue and so on) can be invoked. 1133 * 1134 * @param dev_id 1135 * Event device identifier 1136 * @return 1137 * - 0: Success, device started. 1138 * - -ESTALE : Not all ports of the device are configured 1139 * - -ENOLINK: Not all queues are linked, which could lead to deadlock. 1140 */ 1141 int 1142 rte_event_dev_start(uint8_t dev_id); 1143 1144 /** 1145 * Stop an event device. 1146 * 1147 * This function causes all queued events to be drained, including those 1148 * residing in event ports. While draining events out of the device, this 1149 * function calls the user-provided flush callback (if one was registered) once 1150 * per event. 1151 * 1152 * The device can be restarted with a call to rte_event_dev_start(). Threads 1153 * that continue to enqueue/dequeue while the device is stopped, or being 1154 * stopped, will result in undefined behavior. This includes event adapters, 1155 * which must be stopped prior to stopping the eventdev. 1156 * 1157 * @param dev_id 1158 * Event device identifier. 1159 * 1160 * @see rte_event_dev_stop_flush_callback_register() 1161 */ 1162 void 1163 rte_event_dev_stop(uint8_t dev_id); 1164 1165 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id, 1166 struct rte_event event, void *arg); 1167 /**< Callback function called during rte_event_dev_stop(), invoked once per 1168 * flushed event. 1169 */ 1170 1171 /** 1172 * Registers a callback function to be invoked during rte_event_dev_stop() for 1173 * each flushed event. This function can be used to properly dispose of queued 1174 * events, for example events containing memory pointers. 1175 * 1176 * The callback function is only registered for the calling process. The 1177 * callback function must be registered in every process that can call 1178 * rte_event_dev_stop(). 1179 * 1180 * To unregister a callback, call this function with a NULL callback pointer. 1181 * 1182 * @param dev_id 1183 * The identifier of the device. 1184 * @param callback 1185 * Callback function invoked once per flushed event. 1186 * @param userdata 1187 * Argument supplied to callback. 1188 * 1189 * @return 1190 * - 0 on success. 1191 * - -EINVAL if *dev_id* is invalid 1192 * 1193 * @see rte_event_dev_stop() 1194 */ 1195 int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, 1196 rte_eventdev_stop_flush_t callback, void *userdata); 1197 1198 /** 1199 * Close an event device. The device cannot be restarted! 1200 * 1201 * @param dev_id 1202 * Event device identifier 1203 * 1204 * @return 1205 * - 0 on successfully closing device 1206 * - <0 on failure to close device 1207 * - (-EAGAIN) if device is busy 1208 */ 1209 int 1210 rte_event_dev_close(uint8_t dev_id); 1211 1212 /** 1213 * Event vector structure. 1214 */ 1215 struct rte_event_vector { 1216 uint16_t nb_elem; 1217 /**< Number of elements valid in this event vector. */ 1218 uint16_t elem_offset : 12; 1219 /**< Offset into the vector array where valid elements start from. */ 1220 uint16_t rsvd : 3; 1221 /**< Reserved for future use */ 1222 uint16_t attr_valid : 1; 1223 /**< Indicates that the below union attributes have valid information. 1224 */ 1225 union { 1226 /* Used by Rx/Tx adapter. 1227 * Indicates that all the elements in this vector belong to the 1228 * same port and queue pair when originating from Rx adapter, 1229 * valid only when event type is ETHDEV_VECTOR or 1230 * ETH_RX_ADAPTER_VECTOR. 1231 * Can also be used to indicate the Tx adapter the destination 1232 * port and queue of the mbufs in the vector 1233 */ 1234 struct { 1235 uint16_t port; 1236 /* Ethernet device port id. */ 1237 uint16_t queue; 1238 /* Ethernet device queue id. */ 1239 }; 1240 }; 1241 /**< Union to hold common attributes of the vector array. */ 1242 uint64_t impl_opaque; 1243 1244 /* empty structures do not have zero size in C++ leading to compilation errors 1245 * with clang about structure having different sizes in C and C++. 1246 * Since these are all zero-sized arrays, we can omit the "union" wrapper for 1247 * C++ builds, removing the warning. 1248 */ 1249 #ifndef __cplusplus 1250 /**< Implementation specific opaque value. 1251 * An implementation may use this field to hold implementation specific 1252 * value to share between dequeue and enqueue operation. 1253 * The application should not modify this field. 1254 */ 1255 union { 1256 #endif 1257 struct rte_mbuf *mbufs[0]; 1258 void *ptrs[0]; 1259 uint64_t u64s[0]; 1260 #ifndef __cplusplus 1261 } __rte_aligned(16); 1262 #endif 1263 /**< Start of the vector array union. Depending upon the event type the 1264 * vector array can be an array of mbufs or pointers or opaque u64 1265 * values. 1266 */ 1267 } __rte_aligned(16); 1268 1269 /* Scheduler type definitions */ 1270 #define RTE_SCHED_TYPE_ORDERED 0 1271 /**< Ordered scheduling 1272 * 1273 * Events from an ordered flow of an event queue can be scheduled to multiple 1274 * ports for concurrent processing while maintaining the original event order. 1275 * This scheme enables the user to achieve high single flow throughput by 1276 * avoiding SW synchronization for ordering between ports which bound to cores. 1277 * 1278 * The source flow ordering from an event queue is maintained when events are 1279 * enqueued to their destination queue within the same ordered flow context. 1280 * An event port holds the context until application call 1281 * rte_event_dequeue_burst() from the same port, which implicitly releases 1282 * the context. 1283 * User may allow the scheduler to release the context earlier than that 1284 * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation. 1285 * 1286 * Events from the source queue appear in their original order when dequeued 1287 * from a destination queue. 1288 * Event ordering is based on the received event(s), but also other 1289 * (newly allocated or stored) events are ordered when enqueued within the same 1290 * ordered context. Events not enqueued (e.g. released or stored) within the 1291 * context are considered missing from reordering and are skipped at this time 1292 * (but can be ordered again within another context). 1293 * 1294 * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE 1295 */ 1296 1297 #define RTE_SCHED_TYPE_ATOMIC 1 1298 /**< Atomic scheduling 1299 * 1300 * Events from an atomic flow of an event queue can be scheduled only to a 1301 * single port at a time. The port is guaranteed to have exclusive (atomic) 1302 * access to the associated flow context, which enables the user to avoid SW 1303 * synchronization. Atomic flows also help to maintain event ordering 1304 * since only one port at a time can process events from a flow of an 1305 * event queue. 1306 * 1307 * The atomic queue synchronization context is dedicated to the port until 1308 * application call rte_event_dequeue_burst() from the same port, 1309 * which implicitly releases the context. User may allow the scheduler to 1310 * release the context earlier than that by invoking rte_event_enqueue_burst() 1311 * with RTE_EVENT_OP_RELEASE operation. 1312 * 1313 * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE 1314 */ 1315 1316 #define RTE_SCHED_TYPE_PARALLEL 2 1317 /**< Parallel scheduling 1318 * 1319 * The scheduler performs priority scheduling, load balancing, etc. functions 1320 * but does not provide additional event synchronization or ordering. 1321 * It is free to schedule events from a single parallel flow of an event queue 1322 * to multiple events ports for concurrent processing. 1323 * The application is responsible for flow context synchronization and 1324 * event ordering (SW synchronization). 1325 * 1326 * @see rte_event_queue_setup(), rte_event_dequeue_burst() 1327 */ 1328 1329 /* Event types to classify the event source */ 1330 #define RTE_EVENT_TYPE_ETHDEV 0x0 1331 /**< The event generated from ethdev subsystem */ 1332 #define RTE_EVENT_TYPE_CRYPTODEV 0x1 1333 /**< The event generated from crypodev subsystem */ 1334 #define RTE_EVENT_TYPE_TIMER 0x2 1335 /**< The event generated from event timer adapter */ 1336 #define RTE_EVENT_TYPE_CPU 0x3 1337 /**< The event generated from cpu for pipelining. 1338 * Application may use *sub_event_type* to further classify the event 1339 */ 1340 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4 1341 /**< The event generated from event eth Rx adapter */ 1342 #define RTE_EVENT_TYPE_DMADEV 0x5 1343 /**< The event generated from dma subsystem */ 1344 #define RTE_EVENT_TYPE_VECTOR 0x8 1345 /**< Indicates that event is a vector. 1346 * All vector event types should be a logical OR of EVENT_TYPE_VECTOR. 1347 * This simplifies the pipeline design as one can split processing the events 1348 * between vector events and normal event across event types. 1349 * Example: 1350 * if (ev.event_type & RTE_EVENT_TYPE_VECTOR) { 1351 * // Classify and handle vector event. 1352 * } else { 1353 * // Classify and handle event. 1354 * } 1355 */ 1356 #define RTE_EVENT_TYPE_ETHDEV_VECTOR \ 1357 (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV) 1358 /**< The event vector generated from ethdev subsystem */ 1359 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU) 1360 /**< The event vector generated from cpu for pipelining. */ 1361 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \ 1362 (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER) 1363 /**< The event vector generated from eth Rx adapter. */ 1364 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR \ 1365 (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV) 1366 /**< The event vector generated from cryptodev adapter. */ 1367 1368 #define RTE_EVENT_TYPE_MAX 0x10 1369 /**< Maximum number of event types */ 1370 1371 /* Event enqueue operations */ 1372 #define RTE_EVENT_OP_NEW 0 1373 /**< The event producers use this operation to inject a new event to the 1374 * event device. 1375 */ 1376 #define RTE_EVENT_OP_FORWARD 1 1377 /**< The CPU use this operation to forward the event to different event queue or 1378 * change to new application specific flow or schedule type to enable 1379 * pipelining. 1380 * 1381 * This operation must only be enqueued to the same port that the 1382 * event to be forwarded was dequeued from. 1383 */ 1384 #define RTE_EVENT_OP_RELEASE 2 1385 /**< Release the flow context associated with the schedule type. 1386 * 1387 * If current flow's scheduler type method is *RTE_SCHED_TYPE_ATOMIC* 1388 * then this function hints the scheduler that the user has completed critical 1389 * section processing in the current atomic context. 1390 * The scheduler is now allowed to schedule events from the same flow from 1391 * an event queue to another port. However, the context may be still held 1392 * until the next rte_event_dequeue_burst() call, this call allows but does not 1393 * force the scheduler to release the context early. 1394 * 1395 * Early atomic context release may increase parallelism and thus system 1396 * performance, but the user needs to design carefully the split into critical 1397 * vs non-critical sections. 1398 * 1399 * If current flow's scheduler type method is *RTE_SCHED_TYPE_ORDERED* 1400 * then this function hints the scheduler that the user has done all that need 1401 * to maintain event order in the current ordered context. 1402 * The scheduler is allowed to release the ordered context of this port and 1403 * avoid reordering any following enqueues. 1404 * 1405 * Early ordered context release may increase parallelism and thus system 1406 * performance. 1407 * 1408 * If current flow's scheduler type method is *RTE_SCHED_TYPE_PARALLEL* 1409 * or no scheduling context is held then this function may be an NOOP, 1410 * depending on the implementation. 1411 * 1412 * This operation must only be enqueued to the same port that the 1413 * event to be released was dequeued from. 1414 */ 1415 1416 /** 1417 * The generic *rte_event* structure to hold the event attributes 1418 * for dequeue and enqueue operation 1419 */ 1420 struct rte_event { 1421 /** WORD0 */ 1422 union { 1423 uint64_t event; 1424 /** Event attributes for dequeue or enqueue operation */ 1425 struct { 1426 uint32_t flow_id:20; 1427 /**< Targeted flow identifier for the enqueue and 1428 * dequeue operation. 1429 * The value must be in the range of 1430 * [0, nb_event_queue_flows - 1] which 1431 * previously supplied to rte_event_dev_configure(). 1432 */ 1433 uint32_t sub_event_type:8; 1434 /**< Sub-event types based on the event source. 1435 * @see RTE_EVENT_TYPE_CPU 1436 */ 1437 uint32_t event_type:4; 1438 /**< Event type to classify the event source. 1439 * @see RTE_EVENT_TYPE_ETHDEV, (RTE_EVENT_TYPE_*) 1440 */ 1441 uint8_t op:2; 1442 /**< The type of event enqueue operation - new/forward/ 1443 * etc.This field is not preserved across an instance 1444 * and is undefined on dequeue. 1445 * @see RTE_EVENT_OP_NEW, (RTE_EVENT_OP_*) 1446 */ 1447 uint8_t rsvd:4; 1448 /**< Reserved for future use */ 1449 uint8_t sched_type:2; 1450 /**< Scheduler synchronization type (RTE_SCHED_TYPE_*) 1451 * associated with flow id on a given event queue 1452 * for the enqueue and dequeue operation. 1453 */ 1454 uint8_t queue_id; 1455 /**< Targeted event queue identifier for the enqueue or 1456 * dequeue operation. 1457 * The value must be in the range of 1458 * [0, nb_event_queues - 1] which previously supplied to 1459 * rte_event_dev_configure(). 1460 */ 1461 uint8_t priority; 1462 /**< Event priority relative to other events in the 1463 * event queue. The requested priority should in the 1464 * range of [RTE_EVENT_DEV_PRIORITY_HIGHEST, 1465 * RTE_EVENT_DEV_PRIORITY_LOWEST]. 1466 * The implementation shall normalize the requested 1467 * priority to supported priority value. 1468 * Valid when the device has 1469 * RTE_EVENT_DEV_CAP_EVENT_QOS capability. 1470 */ 1471 uint8_t impl_opaque; 1472 /**< Implementation specific opaque value. 1473 * An implementation may use this field to hold 1474 * implementation specific value to share between 1475 * dequeue and enqueue operation. 1476 * The application should not modify this field. 1477 */ 1478 }; 1479 }; 1480 /** WORD1 */ 1481 union { 1482 uint64_t u64; 1483 /**< Opaque 64-bit value */ 1484 void *event_ptr; 1485 /**< Opaque event pointer */ 1486 struct rte_mbuf *mbuf; 1487 /**< mbuf pointer if dequeued event is associated with mbuf */ 1488 struct rte_event_vector *vec; 1489 /**< Event vector pointer. */ 1490 }; 1491 }; 1492 1493 /* Ethdev Rx adapter capability bitmap flags */ 1494 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1 1495 /**< This flag is sent when the packet transfer mechanism is in HW. 1496 * Ethdev can send packets to the event device using internal event port. 1497 */ 1498 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2 1499 /**< Adapter supports multiple event queues per ethdev. Every ethdev 1500 * Rx queue can be connected to a unique event queue. 1501 */ 1502 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4 1503 /**< The application can override the adapter generated flow ID in the 1504 * event. This flow ID can be specified when adding an ethdev Rx queue 1505 * to the adapter using the ev.flow_id member. 1506 * @see struct rte_event_eth_rx_adapter_queue_conf::ev 1507 * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags 1508 */ 1509 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8 1510 /**< Adapter supports event vectorization per ethdev. */ 1511 1512 /** 1513 * Retrieve the event device's ethdev Rx adapter capabilities for the 1514 * specified ethernet port 1515 * 1516 * @param dev_id 1517 * The identifier of the device. 1518 * 1519 * @param eth_port_id 1520 * The identifier of the ethernet device. 1521 * 1522 * @param[out] caps 1523 * A pointer to memory filled with Rx event adapter capabilities. 1524 * 1525 * @return 1526 * - 0: Success, driver provides Rx event adapter capabilities for the 1527 * ethernet device. 1528 * - <0: Error code returned by the driver function. 1529 */ 1530 int 1531 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, 1532 uint32_t *caps); 1533 1534 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0) 1535 /**< This flag is set when the timer mechanism is in HW. */ 1536 1537 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC (1ULL << 1) 1538 /**< This flag is set if periodic mode is supported. */ 1539 1540 /** 1541 * Retrieve the event device's timer adapter capabilities. 1542 * 1543 * @param dev_id 1544 * The identifier of the device. 1545 * 1546 * @param[out] caps 1547 * A pointer to memory to be filled with event timer adapter capabilities. 1548 * 1549 * @return 1550 * - 0: Success, driver provided event timer adapter capabilities. 1551 * - <0: Error code returned by the driver function. 1552 */ 1553 int 1554 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps); 1555 1556 /* Crypto adapter capability bitmap flag */ 1557 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1 1558 /**< Flag indicates HW is capable of generating events in 1559 * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send 1560 * packets to the event device as new events using an internal 1561 * event port. 1562 */ 1563 1564 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2 1565 /**< Flag indicates HW is capable of generating events in 1566 * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send 1567 * packets to the event device as forwarded event using an 1568 * internal event port. 1569 */ 1570 1571 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4 1572 /**< Flag indicates HW is capable of mapping crypto queue pair to 1573 * event queue. 1574 */ 1575 1576 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8 1577 /**< Flag indicates HW/SW supports a mechanism to store and retrieve 1578 * the private data information along with the crypto session. 1579 */ 1580 1581 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR 0x10 1582 /**< Flag indicates HW is capable of aggregating processed 1583 * crypto operations into rte_event_vector. 1584 */ 1585 1586 /** 1587 * Retrieve the event device's crypto adapter capabilities for the 1588 * specified cryptodev device 1589 * 1590 * @param dev_id 1591 * The identifier of the device. 1592 * 1593 * @param cdev_id 1594 * The identifier of the cryptodev device. 1595 * 1596 * @param[out] caps 1597 * A pointer to memory filled with event adapter capabilities. 1598 * It is expected to be pre-allocated & initialized by caller. 1599 * 1600 * @return 1601 * - 0: Success, driver provides event adapter capabilities for the 1602 * cryptodev device. 1603 * - <0: Error code returned by the driver function. 1604 */ 1605 int 1606 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, 1607 uint32_t *caps); 1608 1609 /* DMA adapter capability bitmap flag */ 1610 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1 1611 /**< Flag indicates HW is capable of generating events in 1612 * RTE_EVENT_OP_NEW enqueue operation. DMADEV will send 1613 * packets to the event device as new events using an 1614 * internal event port. 1615 */ 1616 1617 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2 1618 /**< Flag indicates HW is capable of generating events in 1619 * RTE_EVENT_OP_FORWARD enqueue operation. DMADEV will send 1620 * packets to the event device as forwarded event using an 1621 * internal event port. 1622 */ 1623 1624 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4 1625 /**< Flag indicates HW is capable of mapping DMA vchan to event queue. */ 1626 1627 /** 1628 * Retrieve the event device's DMA adapter capabilities for the 1629 * specified dmadev device 1630 * 1631 * @param dev_id 1632 * The identifier of the device. 1633 * 1634 * @param dmadev_id 1635 * The identifier of the dmadev device. 1636 * 1637 * @param[out] caps 1638 * A pointer to memory filled with event adapter capabilities. 1639 * It is expected to be pre-allocated & initialized by caller. 1640 * 1641 * @return 1642 * - 0: Success, driver provides event adapter capabilities for the 1643 * dmadev device. 1644 * - <0: Error code returned by the driver function. 1645 * 1646 */ 1647 __rte_experimental 1648 int 1649 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps); 1650 1651 /* Ethdev Tx adapter capability bitmap flags */ 1652 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1 1653 /**< This flag is sent when the PMD supports a packet transmit callback 1654 */ 1655 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2 1656 /**< Indicates that the Tx adapter is capable of handling event vector of 1657 * mbufs. 1658 */ 1659 1660 /** 1661 * Retrieve the event device's eth Tx adapter capabilities 1662 * 1663 * @param dev_id 1664 * The identifier of the device. 1665 * 1666 * @param eth_port_id 1667 * The identifier of the ethernet device. 1668 * 1669 * @param[out] caps 1670 * A pointer to memory filled with eth Tx adapter capabilities. 1671 * 1672 * @return 1673 * - 0: Success, driver provides eth Tx adapter capabilities. 1674 * - <0: Error code returned by the driver function. 1675 */ 1676 int 1677 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, 1678 uint32_t *caps); 1679 1680 /** 1681 * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst() 1682 * 1683 * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag 1684 * then application can use this function to convert timeout value in 1685 * nanoseconds to implementations specific timeout value supplied in 1686 * rte_event_dequeue_burst() 1687 * 1688 * @param dev_id 1689 * The identifier of the device. 1690 * @param ns 1691 * Wait time in nanosecond 1692 * @param[out] timeout_ticks 1693 * Value for the *timeout_ticks* parameter in rte_event_dequeue_burst() 1694 * 1695 * @return 1696 * - 0 on success. 1697 * - -ENOTSUP if the device doesn't support timeouts 1698 * - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL 1699 * - other values < 0 on failure. 1700 * 1701 * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT 1702 * @see rte_event_dev_configure() 1703 */ 1704 int 1705 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, 1706 uint64_t *timeout_ticks); 1707 1708 /** 1709 * Link multiple source event queues supplied in *queues* to the destination 1710 * event port designated by its *port_id* with associated service priority 1711 * supplied in *priorities* on the event device designated by its *dev_id*. 1712 * 1713 * The link establishment shall enable the event port *port_id* from 1714 * receiving events from the specified event queue(s) supplied in *queues* 1715 * 1716 * An event queue may link to one or more event ports. 1717 * The number of links can be established from an event queue to event port is 1718 * implementation defined. 1719 * 1720 * Event queue(s) to event port link establishment can be changed at runtime 1721 * without re-configuring the device to support scaling and to reduce the 1722 * latency of critical work by establishing the link with more event ports 1723 * at runtime. 1724 * 1725 * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater 1726 * than or equal to one, this function links the event queues to the default 1727 * profile_id i.e. profile_id 0 of the event port. 1728 * 1729 * @param dev_id 1730 * The identifier of the device. 1731 * 1732 * @param port_id 1733 * Event port identifier to select the destination port to link. 1734 * 1735 * @param queues 1736 * Points to an array of *nb_links* event queues to be linked 1737 * to the event port. 1738 * NULL value is allowed, in which case this function links all the configured 1739 * event queues *nb_event_queues* which previously supplied to 1740 * rte_event_dev_configure() to the event port *port_id* 1741 * 1742 * @param priorities 1743 * Points to an array of *nb_links* service priorities associated with each 1744 * event queue link to event port. 1745 * The priority defines the event port's servicing priority for 1746 * event queue, which may be ignored by an implementation. 1747 * The requested priority should in the range of 1748 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST]. 1749 * The implementation shall normalize the requested priority to 1750 * implementation supported priority value. 1751 * NULL value is allowed, in which case this function links the event queues 1752 * with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority 1753 * 1754 * @param nb_links 1755 * The number of links to establish. This parameter is ignored if queues is 1756 * NULL. 1757 * 1758 * @return 1759 * The number of links actually established. The return value can be less than 1760 * the value of the *nb_links* parameter when the implementation has the 1761 * limitation on specific queue to port link establishment or if invalid 1762 * parameters are specified in *queues* 1763 * If the return value is less than *nb_links*, the remaining links at the end 1764 * of link[] are not established, and the caller has to take care of them. 1765 * If return value is less than *nb_links* then implementation shall update the 1766 * rte_errno accordingly, Possible rte_errno values are 1767 * (EDQUOT) Quota exceeded(Application tried to link the queue configured with 1768 * RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports) 1769 * (EINVAL) Invalid parameter 1770 */ 1771 int 1772 rte_event_port_link(uint8_t dev_id, uint8_t port_id, 1773 const uint8_t queues[], const uint8_t priorities[], 1774 uint16_t nb_links); 1775 1776 /** 1777 * Unlink multiple source event queues supplied in *queues* from the destination 1778 * event port designated by its *port_id* on the event device designated 1779 * by its *dev_id*. 1780 * 1781 * The unlink call issues an async request to disable the event port *port_id* 1782 * from receiving events from the specified event queue *queue_id*. 1783 * Event queue(s) to event port unlink establishment can be changed at runtime 1784 * without re-configuring the device. 1785 * 1786 * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater 1787 * than or equal to one, this function unlinks the event queues from the default 1788 * profile identifier i.e. profile 0 of the event port. 1789 * 1790 * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks. 1791 * 1792 * @param dev_id 1793 * The identifier of the device. 1794 * 1795 * @param port_id 1796 * Event port identifier to select the destination port to unlink. 1797 * 1798 * @param queues 1799 * Points to an array of *nb_unlinks* event queues to be unlinked 1800 * from the event port. 1801 * NULL value is allowed, in which case this function unlinks all the 1802 * event queue(s) from the event port *port_id*. 1803 * 1804 * @param nb_unlinks 1805 * The number of unlinks to establish. This parameter is ignored if queues is 1806 * NULL. 1807 * 1808 * @return 1809 * The number of unlinks successfully requested. The return value can be less 1810 * than the value of the *nb_unlinks* parameter when the implementation has the 1811 * limitation on specific queue to port unlink establishment or 1812 * if invalid parameters are specified. 1813 * If the return value is less than *nb_unlinks*, the remaining queues at the 1814 * end of queues[] are not unlinked, and the caller has to take care of them. 1815 * If return value is less than *nb_unlinks* then implementation shall update 1816 * the rte_errno accordingly, Possible rte_errno values are 1817 * (EINVAL) Invalid parameter 1818 */ 1819 int 1820 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, 1821 uint8_t queues[], uint16_t nb_unlinks); 1822 1823 /** 1824 * Link multiple source event queues supplied in *queues* to the destination 1825 * event port designated by its *port_id* with associated profile identifier 1826 * supplied in *profile_id* with service priorities supplied in *priorities* 1827 * on the event device designated by its *dev_id*. 1828 * 1829 * If *profile_id* is set to 0 then, the links created by the call `rte_event_port_link` 1830 * will be overwritten. 1831 * 1832 * Event ports by default use profile_id 0 unless it is changed using the 1833 * call ``rte_event_port_profile_switch()``. 1834 * 1835 * The link establishment shall enable the event port *port_id* from 1836 * receiving events from the specified event queue(s) supplied in *queues* 1837 * 1838 * An event queue may link to one or more event ports. 1839 * The number of links can be established from an event queue to event port is 1840 * implementation defined. 1841 * 1842 * Event queue(s) to event port link establishment can be changed at runtime 1843 * without re-configuring the device to support scaling and to reduce the 1844 * latency of critical work by establishing the link with more event ports 1845 * at runtime. 1846 * 1847 * @param dev_id 1848 * The identifier of the device. 1849 * 1850 * @param port_id 1851 * Event port identifier to select the destination port to link. 1852 * 1853 * @param queues 1854 * Points to an array of *nb_links* event queues to be linked 1855 * to the event port. 1856 * NULL value is allowed, in which case this function links all the configured 1857 * event queues *nb_event_queues* which previously supplied to 1858 * rte_event_dev_configure() to the event port *port_id* 1859 * 1860 * @param priorities 1861 * Points to an array of *nb_links* service priorities associated with each 1862 * event queue link to event port. 1863 * The priority defines the event port's servicing priority for 1864 * event queue, which may be ignored by an implementation. 1865 * The requested priority should in the range of 1866 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST]. 1867 * The implementation shall normalize the requested priority to 1868 * implementation supported priority value. 1869 * NULL value is allowed, in which case this function links the event queues 1870 * with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority 1871 * 1872 * @param nb_links 1873 * The number of links to establish. This parameter is ignored if queues is 1874 * NULL. 1875 * 1876 * @param profile_id 1877 * The profile identifier associated with the links between event queues and 1878 * event port. Should be less than the max capability reported by 1879 * ``rte_event_dev_info::max_profiles_per_port`` 1880 * 1881 * @return 1882 * The number of links actually established. The return value can be less than 1883 * the value of the *nb_links* parameter when the implementation has the 1884 * limitation on specific queue to port link establishment or if invalid 1885 * parameters are specified in *queues* 1886 * If the return value is less than *nb_links*, the remaining links at the end 1887 * of link[] are not established, and the caller has to take care of them. 1888 * If return value is less than *nb_links* then implementation shall update the 1889 * rte_errno accordingly, Possible rte_errno values are 1890 * (EDQUOT) Quota exceeded(Application tried to link the queue configured with 1891 * RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports) 1892 * (EINVAL) Invalid parameter 1893 * 1894 */ 1895 __rte_experimental 1896 int 1897 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], 1898 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id); 1899 1900 /** 1901 * Unlink multiple source event queues supplied in *queues* that belong to profile 1902 * designated by *profile_id* from the destination event port designated by its 1903 * *port_id* on the event device designated by its *dev_id*. 1904 * 1905 * If *profile_id* is set to 0 i.e., the default profile then, then this function 1906 * will act as ``rte_event_port_unlink``. 1907 * 1908 * The unlink call issues an async request to disable the event port *port_id* 1909 * from receiving events from the specified event queue *queue_id*. 1910 * Event queue(s) to event port unlink establishment can be changed at runtime 1911 * without re-configuring the device. 1912 * 1913 * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks. 1914 * 1915 * @param dev_id 1916 * The identifier of the device. 1917 * 1918 * @param port_id 1919 * Event port identifier to select the destination port to unlink. 1920 * 1921 * @param queues 1922 * Points to an array of *nb_unlinks* event queues to be unlinked 1923 * from the event port. 1924 * NULL value is allowed, in which case this function unlinks all the 1925 * event queue(s) from the event port *port_id*. 1926 * 1927 * @param nb_unlinks 1928 * The number of unlinks to establish. This parameter is ignored if queues is 1929 * NULL. 1930 * 1931 * @param profile_id 1932 * The profile identifier associated with the links between event queues and 1933 * event port. Should be less than the max capability reported by 1934 * ``rte_event_dev_info::max_profiles_per_port`` 1935 * 1936 * @return 1937 * The number of unlinks successfully requested. The return value can be less 1938 * than the value of the *nb_unlinks* parameter when the implementation has the 1939 * limitation on specific queue to port unlink establishment or 1940 * if invalid parameters are specified. 1941 * If the return value is less than *nb_unlinks*, the remaining queues at the 1942 * end of queues[] are not unlinked, and the caller has to take care of them. 1943 * If return value is less than *nb_unlinks* then implementation shall update 1944 * the rte_errno accordingly, Possible rte_errno values are 1945 * (EINVAL) Invalid parameter 1946 * 1947 */ 1948 __rte_experimental 1949 int 1950 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], 1951 uint16_t nb_unlinks, uint8_t profile_id); 1952 1953 /** 1954 * Returns the number of unlinks in progress. 1955 * 1956 * This function provides the application with a method to detect when an 1957 * unlink has been completed by the implementation. 1958 * 1959 * @see rte_event_port_unlink() to issue unlink requests. 1960 * 1961 * @param dev_id 1962 * The identifier of the device. 1963 * 1964 * @param port_id 1965 * Event port identifier to select port to check for unlinks in progress. 1966 * 1967 * @return 1968 * The number of unlinks that are in progress. A return of zero indicates that 1969 * there are no outstanding unlink requests. A positive return value indicates 1970 * the number of unlinks that are in progress, but are not yet complete. 1971 * A negative return value indicates an error, -EINVAL indicates an invalid 1972 * parameter passed for *dev_id* or *port_id*. 1973 */ 1974 int 1975 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id); 1976 1977 /** 1978 * Retrieve the list of source event queues and its associated service priority 1979 * linked to the destination event port designated by its *port_id* 1980 * on the event device designated by its *dev_id*. 1981 * 1982 * @param dev_id 1983 * The identifier of the device. 1984 * 1985 * @param port_id 1986 * Event port identifier. 1987 * 1988 * @param[out] queues 1989 * Points to an array of *queues* for output. 1990 * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to 1991 * store the event queue(s) linked with event port *port_id* 1992 * 1993 * @param[out] priorities 1994 * Points to an array of *priorities* for output. 1995 * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to 1996 * store the service priority associated with each event queue linked 1997 * 1998 * @return 1999 * The number of links established on the event port designated by its 2000 * *port_id*. 2001 * - <0 on failure. 2002 */ 2003 int 2004 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, 2005 uint8_t queues[], uint8_t priorities[]); 2006 2007 /** 2008 * Retrieve the list of source event queues and its service priority 2009 * associated to a *profile_id* and linked to the destination event port 2010 * designated by its *port_id* on the event device designated by its *dev_id*. 2011 * 2012 * @param dev_id 2013 * The identifier of the device. 2014 * 2015 * @param port_id 2016 * Event port identifier. 2017 * 2018 * @param[out] queues 2019 * Points to an array of *queues* for output. 2020 * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to 2021 * store the event queue(s) linked with event port *port_id* 2022 * 2023 * @param[out] priorities 2024 * Points to an array of *priorities* for output. 2025 * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to 2026 * store the service priority associated with each event queue linked 2027 * 2028 * @param profile_id 2029 * The profile identifier associated with the links between event queues and 2030 * event port. Should be less than the max capability reported by 2031 * ``rte_event_dev_info::max_profiles_per_port`` 2032 * 2033 * @return 2034 * The number of links established on the event port designated by its 2035 * *port_id*. 2036 * - <0 on failure. 2037 */ 2038 __rte_experimental 2039 int 2040 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], 2041 uint8_t priorities[], uint8_t profile_id); 2042 2043 /** 2044 * Retrieve the service ID of the event dev. If the adapter doesn't use 2045 * a rte_service function, this function returns -ESRCH. 2046 * 2047 * @param dev_id 2048 * The identifier of the device. 2049 * 2050 * @param [out] service_id 2051 * A pointer to a uint32_t, to be filled in with the service id. 2052 * 2053 * @return 2054 * - 0: Success 2055 * - <0: Error code on failure, if the event dev doesn't use a rte_service 2056 * function, this function returns -ESRCH. 2057 */ 2058 int 2059 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id); 2060 2061 /** 2062 * Dump internal information about *dev_id* to the FILE* provided in *f*. 2063 * 2064 * @param dev_id 2065 * The identifier of the device. 2066 * 2067 * @param f 2068 * A pointer to a file for output 2069 * 2070 * @return 2071 * - 0: on success 2072 * - <0: on failure. 2073 */ 2074 int 2075 rte_event_dev_dump(uint8_t dev_id, FILE *f); 2076 2077 /** Maximum name length for extended statistics counters */ 2078 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64 2079 2080 /** 2081 * Selects the component of the eventdev to retrieve statistics from. 2082 */ 2083 enum rte_event_dev_xstats_mode { 2084 RTE_EVENT_DEV_XSTATS_DEVICE, 2085 RTE_EVENT_DEV_XSTATS_PORT, 2086 RTE_EVENT_DEV_XSTATS_QUEUE, 2087 }; 2088 2089 /** 2090 * A name-key lookup element for extended statistics. 2091 * 2092 * This structure is used to map between names and ID numbers 2093 * for extended ethdev statistics. 2094 */ 2095 struct rte_event_dev_xstats_name { 2096 char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE]; 2097 }; 2098 2099 /** 2100 * Retrieve names of extended statistics of an event device. 2101 * 2102 * @param dev_id 2103 * The identifier of the event device. 2104 * @param mode 2105 * The mode of statistics to retrieve. Choices include the device statistics, 2106 * port statistics or queue statistics. 2107 * @param queue_port_id 2108 * Used to specify the port or queue number in queue or port mode, and is 2109 * ignored in device mode. 2110 * @param[out] xstats_names 2111 * Block of memory to insert names into. Must be at least size in capacity. 2112 * If set to NULL, function returns required capacity. 2113 * @param[out] ids 2114 * Block of memory to insert ids into. Must be at least size in capacity. 2115 * If set to NULL, function returns required capacity. The id values returned 2116 * can be passed to *rte_event_dev_xstats_get* to select statistics. 2117 * @param size 2118 * Capacity of xstats_names (number of names). 2119 * @return 2120 * - positive value lower or equal to size: success. The return value 2121 * is the number of entries filled in the stats table. 2122 * - positive value higher than size: error, the given statistics table 2123 * is too small. The return value corresponds to the size that should 2124 * be given to succeed. The entries in the table are not valid and 2125 * shall not be used by the caller. 2126 * - negative value on error: 2127 * -ENODEV for invalid *dev_id* 2128 * -EINVAL for invalid mode, queue port or id parameters 2129 * -ENOTSUP if the device doesn't support this function. 2130 */ 2131 int 2132 rte_event_dev_xstats_names_get(uint8_t dev_id, 2133 enum rte_event_dev_xstats_mode mode, 2134 uint8_t queue_port_id, 2135 struct rte_event_dev_xstats_name *xstats_names, 2136 uint64_t *ids, 2137 unsigned int size); 2138 2139 /** 2140 * Retrieve extended statistics of an event device. 2141 * 2142 * @param dev_id 2143 * The identifier of the device. 2144 * @param mode 2145 * The mode of statistics to retrieve. Choices include the device statistics, 2146 * port statistics or queue statistics. 2147 * @param queue_port_id 2148 * Used to specify the port or queue number in queue or port mode, and is 2149 * ignored in device mode. 2150 * @param ids 2151 * The id numbers of the stats to get. The ids can be got from the stat 2152 * position in the stat list from rte_event_dev_get_xstats_names(), or 2153 * by using rte_event_dev_xstats_by_name_get(). 2154 * @param[out] values 2155 * The values for each stats request by ID. 2156 * @param n 2157 * The number of stats requested 2158 * @return 2159 * - positive value: number of stat entries filled into the values array 2160 * - negative value on error: 2161 * -ENODEV for invalid *dev_id* 2162 * -EINVAL for invalid mode, queue port or id parameters 2163 * -ENOTSUP if the device doesn't support this function. 2164 */ 2165 int 2166 rte_event_dev_xstats_get(uint8_t dev_id, 2167 enum rte_event_dev_xstats_mode mode, 2168 uint8_t queue_port_id, 2169 const uint64_t ids[], 2170 uint64_t values[], unsigned int n); 2171 2172 /** 2173 * Retrieve the value of a single stat by requesting it by name. 2174 * 2175 * @param dev_id 2176 * The identifier of the device 2177 * @param name 2178 * The stat name to retrieve 2179 * @param[out] id 2180 * If non-NULL, the numerical id of the stat will be returned, so that further 2181 * requests for the stat can be got using rte_event_dev_xstats_get, which will 2182 * be faster as it doesn't need to scan a list of names for the stat. 2183 * If the stat cannot be found, the id returned will be (unsigned)-1. 2184 * @return 2185 * - positive value or zero: the stat value 2186 * - negative value: -EINVAL if stat not found, -ENOTSUP if not supported. 2187 */ 2188 uint64_t 2189 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, 2190 uint64_t *id); 2191 2192 /** 2193 * Reset the values of the xstats of the selected component in the device. 2194 * 2195 * @param dev_id 2196 * The identifier of the device 2197 * @param mode 2198 * The mode of the statistics to reset. Choose from device, queue or port. 2199 * @param queue_port_id 2200 * The queue or port to reset. 0 and positive values select ports and queues, 2201 * while -1 indicates all ports or queues. 2202 * @param ids 2203 * Selects specific statistics to be reset. When NULL, all statistics selected 2204 * by *mode* will be reset. If non-NULL, must point to array of at least 2205 * *nb_ids* size. 2206 * @param nb_ids 2207 * The number of ids available from the *ids* array. Ignored when ids is NULL. 2208 * @return 2209 * - zero: successfully reset the statistics to zero 2210 * - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported. 2211 */ 2212 int 2213 rte_event_dev_xstats_reset(uint8_t dev_id, 2214 enum rte_event_dev_xstats_mode mode, 2215 int16_t queue_port_id, 2216 const uint64_t ids[], 2217 uint32_t nb_ids); 2218 2219 /** 2220 * Trigger the eventdev self test. 2221 * 2222 * @param dev_id 2223 * The identifier of the device 2224 * @return 2225 * - 0: Selftest successful 2226 * - -ENOTSUP if the device doesn't support selftest 2227 * - other values < 0 on failure. 2228 */ 2229 int rte_event_dev_selftest(uint8_t dev_id); 2230 2231 /** 2232 * Get the memory required per event vector based on the number of elements per 2233 * vector. 2234 * This should be used to create the mempool that holds the event vectors. 2235 * 2236 * @param name 2237 * The name of the vector pool. 2238 * @param n 2239 * The number of elements in the mbuf pool. 2240 * @param cache_size 2241 * Size of the per-core object cache. See rte_mempool_create() for 2242 * details. 2243 * @param nb_elem 2244 * The number of elements that a single event vector should be able to hold. 2245 * @param socket_id 2246 * The socket identifier where the memory should be allocated. The 2247 * value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the 2248 * reserved zone 2249 * 2250 * @return 2251 * The pointer to the newly allocated mempool, on success. NULL on error 2252 * with rte_errno set appropriately. Possible rte_errno values include: 2253 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure 2254 * - E_RTE_SECONDARY - function was called from a secondary process instance 2255 * - EINVAL - cache size provided is too large, or priv_size is not aligned. 2256 * - ENOSPC - the maximum number of memzones has already been allocated 2257 * - EEXIST - a memzone with the same name already exists 2258 * - ENOMEM - no appropriate memory area found in which to create memzone 2259 * - ENAMETOOLONG - mempool name requested is too long. 2260 */ 2261 struct rte_mempool * 2262 rte_event_vector_pool_create(const char *name, unsigned int n, 2263 unsigned int cache_size, uint16_t nb_elem, 2264 int socket_id); 2265 2266 #include <rte_eventdev_core.h> 2267 2268 static __rte_always_inline uint16_t 2269 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, 2270 const struct rte_event ev[], uint16_t nb_events, 2271 const event_enqueue_burst_t fn) 2272 { 2273 const struct rte_event_fp_ops *fp_ops; 2274 void *port; 2275 2276 fp_ops = &rte_event_fp_ops[dev_id]; 2277 port = fp_ops->data[port_id]; 2278 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 2279 if (dev_id >= RTE_EVENT_MAX_DEVS || 2280 port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) { 2281 rte_errno = EINVAL; 2282 return 0; 2283 } 2284 2285 if (port == NULL) { 2286 rte_errno = EINVAL; 2287 return 0; 2288 } 2289 #endif 2290 rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn); 2291 /* 2292 * Allow zero cost non burst mode routine invocation if application 2293 * requests nb_events as const one 2294 */ 2295 if (nb_events == 1) 2296 return (fp_ops->enqueue)(port, ev); 2297 else 2298 return fn(port, ev, nb_events); 2299 } 2300 2301 /** 2302 * Enqueue a burst of events objects or an event object supplied in *rte_event* 2303 * structure on an event device designated by its *dev_id* through the event 2304 * port specified by *port_id*. Each event object specifies the event queue on 2305 * which it will be enqueued. 2306 * 2307 * The *nb_events* parameter is the number of event objects to enqueue which are 2308 * supplied in the *ev* array of *rte_event* structure. 2309 * 2310 * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be 2311 * enqueued to the same port that their associated events were dequeued from. 2312 * 2313 * The rte_event_enqueue_burst() function returns the number of 2314 * events objects it actually enqueued. A return value equal to *nb_events* 2315 * means that all event objects have been enqueued. 2316 * 2317 * @param dev_id 2318 * The identifier of the device. 2319 * @param port_id 2320 * The identifier of the event port. 2321 * @param ev 2322 * Points to an array of *nb_events* objects of type *rte_event* structure 2323 * which contain the event object enqueue operations to be processed. 2324 * @param nb_events 2325 * The number of event objects to enqueue, typically number of 2326 * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...) 2327 * available for this port. 2328 * 2329 * @return 2330 * The number of event objects actually enqueued on the event device. The 2331 * return value can be less than the value of the *nb_events* parameter when 2332 * the event devices queue is full or if invalid parameters are specified in a 2333 * *rte_event*. If the return value is less than *nb_events*, the remaining 2334 * events at the end of ev[] are not consumed and the caller has to take care 2335 * of them, and rte_errno is set accordingly. Possible errno values include: 2336 * - EINVAL The port ID is invalid, device ID is invalid, an event's queue 2337 * ID is invalid, or an event's sched type doesn't match the 2338 * capabilities of the destination queue. 2339 * - ENOSPC The event port was backpressured and unable to enqueue 2340 * one or more events. This error code is only applicable to 2341 * closed systems. 2342 * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH 2343 */ 2344 static inline uint16_t 2345 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, 2346 const struct rte_event ev[], uint16_t nb_events) 2347 { 2348 const struct rte_event_fp_ops *fp_ops; 2349 2350 fp_ops = &rte_event_fp_ops[dev_id]; 2351 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, 2352 fp_ops->enqueue_burst); 2353 } 2354 2355 /** 2356 * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on 2357 * an event device designated by its *dev_id* through the event port specified 2358 * by *port_id*. 2359 * 2360 * Provides the same functionality as rte_event_enqueue_burst(), expect that 2361 * application can use this API when the all objects in the burst contains 2362 * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized 2363 * function can provide the additional hint to the PMD and optimize if possible. 2364 * 2365 * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst 2366 * has event object of operation type != RTE_EVENT_OP_NEW. 2367 * 2368 * @param dev_id 2369 * The identifier of the device. 2370 * @param port_id 2371 * The identifier of the event port. 2372 * @param ev 2373 * Points to an array of *nb_events* objects of type *rte_event* structure 2374 * which contain the event object enqueue operations to be processed. 2375 * @param nb_events 2376 * The number of event objects to enqueue, typically number of 2377 * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...) 2378 * available for this port. 2379 * 2380 * @return 2381 * The number of event objects actually enqueued on the event device. The 2382 * return value can be less than the value of the *nb_events* parameter when 2383 * the event devices queue is full or if invalid parameters are specified in a 2384 * *rte_event*. If the return value is less than *nb_events*, the remaining 2385 * events at the end of ev[] are not consumed and the caller has to take care 2386 * of them, and rte_errno is set accordingly. Possible errno values include: 2387 * - EINVAL The port ID is invalid, device ID is invalid, an event's queue 2388 * ID is invalid, or an event's sched type doesn't match the 2389 * capabilities of the destination queue. 2390 * - ENOSPC The event port was backpressured and unable to enqueue 2391 * one or more events. This error code is only applicable to 2392 * closed systems. 2393 * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH 2394 * @see rte_event_enqueue_burst() 2395 */ 2396 static inline uint16_t 2397 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, 2398 const struct rte_event ev[], uint16_t nb_events) 2399 { 2400 const struct rte_event_fp_ops *fp_ops; 2401 2402 fp_ops = &rte_event_fp_ops[dev_id]; 2403 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, 2404 fp_ops->enqueue_new_burst); 2405 } 2406 2407 /** 2408 * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD* 2409 * on an event device designated by its *dev_id* through the event port 2410 * specified by *port_id*. 2411 * 2412 * Provides the same functionality as rte_event_enqueue_burst(), expect that 2413 * application can use this API when the all objects in the burst contains 2414 * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized 2415 * function can provide the additional hint to the PMD and optimize if possible. 2416 * 2417 * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst 2418 * has event object of operation type != RTE_EVENT_OP_FORWARD. 2419 * 2420 * @param dev_id 2421 * The identifier of the device. 2422 * @param port_id 2423 * The identifier of the event port. 2424 * @param ev 2425 * Points to an array of *nb_events* objects of type *rte_event* structure 2426 * which contain the event object enqueue operations to be processed. 2427 * @param nb_events 2428 * The number of event objects to enqueue, typically number of 2429 * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...) 2430 * available for this port. 2431 * 2432 * @return 2433 * The number of event objects actually enqueued on the event device. The 2434 * return value can be less than the value of the *nb_events* parameter when 2435 * the event devices queue is full or if invalid parameters are specified in a 2436 * *rte_event*. If the return value is less than *nb_events*, the remaining 2437 * events at the end of ev[] are not consumed and the caller has to take care 2438 * of them, and rte_errno is set accordingly. Possible errno values include: 2439 * - EINVAL The port ID is invalid, device ID is invalid, an event's queue 2440 * ID is invalid, or an event's sched type doesn't match the 2441 * capabilities of the destination queue. 2442 * - ENOSPC The event port was backpressured and unable to enqueue 2443 * one or more events. This error code is only applicable to 2444 * closed systems. 2445 * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH 2446 * @see rte_event_enqueue_burst() 2447 */ 2448 static inline uint16_t 2449 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, 2450 const struct rte_event ev[], uint16_t nb_events) 2451 { 2452 const struct rte_event_fp_ops *fp_ops; 2453 2454 fp_ops = &rte_event_fp_ops[dev_id]; 2455 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, 2456 fp_ops->enqueue_forward_burst); 2457 } 2458 2459 /** 2460 * Dequeue a burst of events objects or an event object from the event port 2461 * designated by its *event_port_id*, on an event device designated 2462 * by its *dev_id*. 2463 * 2464 * rte_event_dequeue_burst() does not dictate the specifics of scheduling 2465 * algorithm as each eventdev driver may have different criteria to schedule 2466 * an event. However, in general, from an application perspective scheduler may 2467 * use the following scheme to dispatch an event to the port. 2468 * 2469 * 1) Selection of event queue based on 2470 * a) The list of event queues are linked to the event port. 2471 * b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event 2472 * queue selection from list is based on event queue priority relative to 2473 * other event queue supplied as *priority* in rte_event_queue_setup() 2474 * c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event 2475 * queue selection from the list is based on event priority supplied as 2476 * *priority* in rte_event_enqueue_burst() 2477 * 2) Selection of event 2478 * a) The number of flows available in selected event queue. 2479 * b) Schedule type method associated with the event 2480 * 2481 * The *nb_events* parameter is the maximum number of event objects to dequeue 2482 * which are returned in the *ev* array of *rte_event* structure. 2483 * 2484 * The rte_event_dequeue_burst() function returns the number of events objects 2485 * it actually dequeued. A return value equal to *nb_events* means that all 2486 * event objects have been dequeued. 2487 * 2488 * The number of events dequeued is the number of scheduler contexts held by 2489 * this port. These contexts are automatically released in the next 2490 * rte_event_dequeue_burst() invocation if the port supports implicit 2491 * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE 2492 * operation can be used to release the contexts early. 2493 * 2494 * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be 2495 * enqueued to the same port that their associated events were dequeued from. 2496 * 2497 * @param dev_id 2498 * The identifier of the device. 2499 * @param port_id 2500 * The identifier of the event port. 2501 * @param[out] ev 2502 * Points to an array of *nb_events* objects of type *rte_event* structure 2503 * for output to be populated with the dequeued event objects. 2504 * @param nb_events 2505 * The maximum number of event objects to dequeue, typically number of 2506 * rte_event_port_dequeue_depth() available for this port. 2507 * 2508 * @param timeout_ticks 2509 * - 0 no-wait, returns immediately if there is no event. 2510 * - >0 wait for the event, if the device is configured with 2511 * RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until 2512 * at least one event is available or *timeout_ticks* time. 2513 * if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT 2514 * then this function will wait until the event available or 2515 * *dequeue_timeout_ns* ns which was previously supplied to 2516 * rte_event_dev_configure() 2517 * 2518 * @return 2519 * The number of event objects actually dequeued from the port. The return 2520 * value can be less than the value of the *nb_events* parameter when the 2521 * event port's queue is not full. 2522 * 2523 * @see rte_event_port_dequeue_depth() 2524 */ 2525 static inline uint16_t 2526 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], 2527 uint16_t nb_events, uint64_t timeout_ticks) 2528 { 2529 const struct rte_event_fp_ops *fp_ops; 2530 void *port; 2531 2532 fp_ops = &rte_event_fp_ops[dev_id]; 2533 port = fp_ops->data[port_id]; 2534 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 2535 if (dev_id >= RTE_EVENT_MAX_DEVS || 2536 port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) { 2537 rte_errno = EINVAL; 2538 return 0; 2539 } 2540 2541 if (port == NULL) { 2542 rte_errno = EINVAL; 2543 return 0; 2544 } 2545 #endif 2546 rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events); 2547 /* 2548 * Allow zero cost non burst mode routine invocation if application 2549 * requests nb_events as const one 2550 */ 2551 if (nb_events == 1) 2552 return (fp_ops->dequeue)(port, ev, timeout_ticks); 2553 else 2554 return (fp_ops->dequeue_burst)(port, ev, nb_events, 2555 timeout_ticks); 2556 } 2557 2558 #define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0) 2559 /**< Force an immediately flush of any buffered events in the port, 2560 * potentially at the cost of additional overhead. 2561 * 2562 * @see rte_event_maintain() 2563 */ 2564 2565 /** 2566 * Maintain an event device. 2567 * 2568 * This function is only relevant for event devices which do not have 2569 * the @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE flag set. Such devices 2570 * require an application thread using a particular port to 2571 * periodically call rte_event_maintain() on that port during periods 2572 * which it is neither attempting to enqueue events to nor dequeue 2573 * events from the port. rte_event_maintain() is a low-overhead 2574 * function and should be called at a high rate (e.g., in the 2575 * application's poll loop). 2576 * 2577 * No port may be left unmaintained. 2578 * 2579 * At the application thread's convenience, rte_event_maintain() may 2580 * (but is not required to) be called even during periods when enqueue 2581 * or dequeue functions are being called, at the cost of a slight 2582 * increase in overhead. 2583 * 2584 * rte_event_maintain() may be called on event devices which have set 2585 * @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, in which case it is a 2586 * no-operation. 2587 * 2588 * @param dev_id 2589 * The identifier of the device. 2590 * @param port_id 2591 * The identifier of the event port. 2592 * @param op 2593 * 0, or @ref RTE_EVENT_DEV_MAINT_OP_FLUSH. 2594 * @return 2595 * - 0 on success. 2596 * - -EINVAL if *dev_id*, *port_id*, or *op* is invalid. 2597 * 2598 * @see RTE_EVENT_DEV_CAP_MAINTENANCE_FREE 2599 */ 2600 static inline int 2601 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op) 2602 { 2603 const struct rte_event_fp_ops *fp_ops; 2604 void *port; 2605 2606 fp_ops = &rte_event_fp_ops[dev_id]; 2607 port = fp_ops->data[port_id]; 2608 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 2609 if (dev_id >= RTE_EVENT_MAX_DEVS || 2610 port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) 2611 return -EINVAL; 2612 2613 if (port == NULL) 2614 return -EINVAL; 2615 2616 if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH)) 2617 return -EINVAL; 2618 #endif 2619 rte_eventdev_trace_maintain(dev_id, port_id, op); 2620 2621 if (fp_ops->maintain != NULL) 2622 fp_ops->maintain(port, op); 2623 2624 return 0; 2625 } 2626 2627 /** 2628 * Change the active profile on an event port. 2629 * 2630 * This function is used to change the current active profile on an event port 2631 * when multiple link profiles are configured on an event port through the 2632 * function call ``rte_event_port_profile_links_set``. 2633 * 2634 * On the subsequent ``rte_event_dequeue_burst`` call, only the event queues 2635 * that were associated with the newly active profile will participate in 2636 * scheduling. 2637 * 2638 * @param dev_id 2639 * The identifier of the device. 2640 * @param port_id 2641 * The identifier of the event port. 2642 * @param profile_id 2643 * The identifier of the profile. 2644 * @return 2645 * - 0 on success. 2646 * - -EINVAL if *dev_id*, *port_id*, or *profile_id* is invalid. 2647 */ 2648 static inline uint8_t 2649 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id) 2650 { 2651 const struct rte_event_fp_ops *fp_ops; 2652 void *port; 2653 2654 fp_ops = &rte_event_fp_ops[dev_id]; 2655 port = fp_ops->data[port_id]; 2656 2657 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 2658 if (dev_id >= RTE_EVENT_MAX_DEVS || 2659 port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) 2660 return -EINVAL; 2661 2662 if (port == NULL) 2663 return -EINVAL; 2664 2665 if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT) 2666 return -EINVAL; 2667 #endif 2668 rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id); 2669 2670 return fp_ops->profile_switch(port, profile_id); 2671 } 2672 2673 #ifdef __cplusplus 2674 } 2675 #endif 2676 2677 #endif /* _RTE_EVENTDEV_H_ */ 2678