1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016 Cavium, Inc. 3 * Copyright(c) 2016-2018 Intel Corporation. 4 * Copyright 2016 NXP 5 * All rights reserved. 6 */ 7 8 #ifndef _RTE_EVENTDEV_H_ 9 #define _RTE_EVENTDEV_H_ 10 11 /** 12 * @file 13 * 14 * RTE Event Device API 15 * ==================== 16 * 17 * In a traditional DPDK application model, the application polls Ethdev port RX 18 * queues to look for work, and processing is done in a run-to-completion manner, 19 * after which the packets are transmitted on a Ethdev TX queue. Load is 20 * distributed by statically assigning ports and queues to lcores, and NIC 21 * receive-side scaling (RSS), or similar, is employed to distribute network flows 22 * (and thus work) on the same port across multiple RX queues. 23 * 24 * In contrast, in an event-driven model, as supported by this "eventdev" library, 25 * incoming packets (or other input events) are fed into an event device, which 26 * schedules those packets across the available lcores, in accordance with its configuration. 27 * This event-driven programming model offers applications automatic multicore scaling, 28 * dynamic load balancing, pipelining, packet order maintenance, synchronization, 29 * and prioritization/quality of service. 30 * 31 * The Event Device API is composed of two parts: 32 * 33 * - The application-oriented Event API that includes functions to setup 34 * an event device (configure it, setup its queues, ports and start it), to 35 * establish the links between queues and ports to receive events, and so on. 36 * 37 * - The driver-oriented Event API that exports a function allowing 38 * an event poll Mode Driver (PMD) to register itself as 39 * an event device driver. 40 * 41 * Application-oriented Event API 42 * ------------------------------ 43 * 44 * Event device components: 45 * 46 * +-----------------+ 47 * | +-------------+ | 48 * +-------+ | | flow 0 | | 49 * |Packet | | +-------------+ | 50 * |event | | +-------------+ | 51 * | | | | flow 1 | |port_link(port0, queue0) 52 * +-------+ | +-------------+ | | +--------+ 53 * +-------+ | +-------------+ o-----v-----o |dequeue +------+ 54 * |Crypto | | | flow n | | | event +------->|Core 0| 55 * |work | | +-------------+ o----+ | port 0 | | | 56 * |done ev| | event queue 0 | | +--------+ +------+ 57 * +-------+ +-----------------+ | 58 * +-------+ | 59 * |Timer | +-----------------+ | +--------+ 60 * |expiry | | +-------------+ | +------o |dequeue +------+ 61 * |event | | | flow 0 | o-----------o event +------->|Core 1| 62 * +-------+ | +-------------+ | +----o port 1 | | | 63 * Event enqueue | +-------------+ | | +--------+ +------+ 64 * o-------------> | | flow 1 | | | 65 * enqueue( | +-------------+ | | 66 * queue_id, | | | +--------+ +------+ 67 * flow_id, | +-------------+ | | | |dequeue |Core 2| 68 * sched_type, | | flow n | o-----------o event +------->| | 69 * event_type, | +-------------+ | | | port 2 | +------+ 70 * subev_type, | event queue 1 | | +--------+ 71 * event) +-----------------+ | +--------+ 72 * | | |dequeue +------+ 73 * +-------+ +-----------------+ | | event +------->|Core n| 74 * |Core | | +-------------+ o-----------o port n | | | 75 * |(SW) | | | flow 0 | | | +--------+ +--+---+ 76 * |event | | +-------------+ | | | 77 * +-------+ | +-------------+ | | | 78 * ^ | | flow 1 | | | | 79 * | | +-------------+ o------+ | 80 * | | +-------------+ | | 81 * | | | flow n | | | 82 * | | +-------------+ | | 83 * | | event queue n | | 84 * | +-----------------+ | 85 * | | 86 * +-----------------------------------------------------------+ 87 * 88 * **Event device**: A hardware or software-based event scheduler. 89 * 90 * **Event**: Represents an item of work and is the smallest unit of scheduling. 91 * An event carries metadata, such as queue ID, scheduling type, and event priority, 92 * and data such as one or more packets or other kinds of buffers. 93 * Some examples of events are: 94 * - a software-generated item of work originating from a lcore, 95 * perhaps carrying a packet to be processed. 96 * - a crypto work completion notification. 97 * - a timer expiry notification. 98 * 99 * **Event queue**: A queue containing events that are to be scheduled by the event device. 100 * An event queue contains events of different flows associated with scheduling 101 * types, such as atomic, ordered, or parallel. 102 * Each event given to an event device must have a valid event queue id field in the metadata, 103 * to specify on which event queue in the device the event must be placed, 104 * for later scheduling. 105 * 106 * **Event port**: An application's interface into the event dev for enqueue and 107 * dequeue operations. Each event port can be linked with one or more 108 * event queues for dequeue operations. 109 * Enqueue and dequeue from a port is not thread-safe, and the expected use-case is 110 * that each port is polled by only a single lcore. [If this is not the case, 111 * a suitable synchronization mechanism should be used to prevent simultaneous 112 * access from multiple lcores.] 113 * To schedule events to an lcore, the event device will schedule them to the event port(s) 114 * being polled by that lcore. 115 * 116 * *NOTE*: By default, all the functions of the Event Device API exported by a PMD 117 * are non-thread-safe functions, which must not be invoked on the same object in parallel on 118 * different logical cores. 119 * For instance, the dequeue function of a PMD cannot be invoked in parallel on two logical 120 * cores to operate on same event port. Of course, this function 121 * can be invoked in parallel by different logical cores on different ports. 122 * It is the responsibility of the upper level application to enforce this rule. 123 * 124 * In all functions of the Event API, the Event device is 125 * designated by an integer >= 0 named the device identifier *dev_id* 126 * 127 * The functions exported by the application Event API to setup a device 128 * must be invoked in the following order: 129 * - rte_event_dev_configure() 130 * - rte_event_queue_setup() 131 * - rte_event_port_setup() 132 * - rte_event_port_link() 133 * - rte_event_dev_start() 134 * 135 * Then, the application can invoke, in any order, the functions 136 * exported by the Event API to dequeue events, enqueue events, 137 * and link and unlink event queue(s) to event ports. 138 * 139 * Before configuring a device, an application should call rte_event_dev_info_get() 140 * to determine the capabilities of the event device, and any queue or port 141 * limits of that device. The parameters set in the various device configuration 142 * structures may need to be adjusted based on the max values provided in the 143 * device information structure returned from the rte_event_dev_info_get() API. 144 * An application may use rte_event_queue_default_conf_get() or 145 * rte_event_port_default_conf_get() to get the default configuration 146 * to set up an event queue or event port by overriding few default values. 147 * 148 * If the application wants to change the configuration (i.e. call 149 * rte_event_dev_configure(), rte_event_queue_setup(), or 150 * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the 151 * device and then do the reconfiguration before calling rte_event_dev_start() 152 * again. The schedule, enqueue and dequeue functions should not be invoked 153 * when the device is stopped. 154 * 155 * Finally, an application can close an Event device by invoking the 156 * rte_event_dev_close() function. Once closed, a device cannot be 157 * reconfigured or restarted. 158 * 159 * Driver-Oriented Event API 160 * ------------------------- 161 * 162 * At the Event driver level, Event devices are represented by a generic 163 * data structure of type *rte_event_dev*. 164 * 165 * Event devices are dynamically registered during the PCI/SoC device probing 166 * phase performed at EAL initialization time. 167 * When an Event device is being probed, an *rte_event_dev* structure is allocated 168 * for it and the event_dev_init() function supplied by the Event driver 169 * is invoked to properly initialize the device. 170 * 171 * The role of the device init function is to reset the device hardware or 172 * to initialize the software event driver implementation. 173 * 174 * If the device init operation is successful, the device is assigned a device 175 * id (dev_id) for application use. 176 * Otherwise, the *rte_event_dev* structure is freed. 177 * 178 * Each function of the application Event API invokes a specific function 179 * of the PMD that controls the target device designated by its device 180 * identifier. 181 * 182 * For this purpose, all device-specific functions of an Event driver are 183 * supplied through a set of pointers contained in a generic structure of type 184 * *event_dev_ops*. 185 * The address of the *event_dev_ops* structure is stored in the *rte_event_dev* 186 * structure by the device init function of the Event driver, which is 187 * invoked during the PCI/SoC device probing phase, as explained earlier. 188 * 189 * In other words, each function of the Event API simply retrieves the 190 * *rte_event_dev* structure associated with the device identifier and 191 * performs an indirect invocation of the corresponding driver function 192 * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure. 193 * 194 * For performance reasons, the addresses of the fast-path functions of the 195 * event driver are not contained in the *event_dev_ops* structure. 196 * Instead, they are directly stored at the beginning of the *rte_event_dev* 197 * structure to avoid an extra indirect memory access during their invocation. 198 * 199 * Event Enqueue, Dequeue and Scheduling 200 * ------------------------------------- 201 * 202 * RTE event device drivers do not use interrupts for enqueue or dequeue 203 * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue 204 * functions to applications. 205 * 206 * The events are injected to event device through *enqueue* operation by 207 * event producers in the system. The typical event producers are ethdev 208 * subsystem for generating packet events, CPU(SW) for generating events based 209 * on different stages of application processing, cryptodev for generating 210 * crypto work completion notification etc 211 * 212 * The *dequeue* operation gets one or more events from the event ports. 213 * The application processes the events and sends them to a downstream event queue through 214 * rte_event_enqueue_burst(), if it is an intermediate stage of event processing. 215 * On the final stage of processing, the application may use the Tx adapter API for maintaining 216 * the event ingress order while sending the packet/event on the wire via NIC Tx. 217 * 218 * The point at which events are scheduled to ports depends on the device. 219 * For hardware devices, scheduling occurs asynchronously without any software 220 * intervention. Software schedulers can either be distributed 221 * (each worker thread schedules events to its own port) or centralized 222 * (a dedicated thread schedules to all ports). Distributed software schedulers 223 * perform the scheduling inside the enqueue or dequeue functions, whereas centralized 224 * software schedulers need a dedicated service core for scheduling. 225 * The absence of the RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag 226 * indicates that the device is centralized and thus needs a dedicated scheduling 227 * thread (generally an RTE service that should be mapped to one or more service cores) 228 * that repeatedly calls the software specific scheduling function. 229 * 230 * An event driven worker thread has following typical workflow on fastpath: 231 * \code{.c} 232 * while (1) { 233 * rte_event_dequeue_burst(...); 234 * (event processing) 235 * rte_event_enqueue_burst(...); 236 * } 237 * \endcode 238 */ 239 240 #include <rte_compat.h> 241 #include <rte_common.h> 242 #include <rte_errno.h> 243 #include <rte_mbuf_pool_ops.h> 244 #include <rte_mempool.h> 245 246 #include "rte_eventdev_trace_fp.h" 247 248 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */ 249 struct rte_event; 250 251 /* Event device capability bitmap flags */ 252 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0) 253 /**< Event scheduling prioritization is based on the priority and weight 254 * associated with each event queue. 255 * 256 * Events from a queue with highest priority 257 * are scheduled first. If the queues are of same priority, weight of the queues 258 * are considered to select a queue in a weighted round robin fashion. 259 * Subsequent dequeue calls from an event port could see events from the same 260 * event queue, if the queue is configured with an affinity count. Affinity 261 * count is the number of subsequent dequeue calls, in which an event port 262 * should use the same event queue if the queue is non-empty 263 * 264 * NOTE: A device may use both queue prioritization and event prioritization 265 * (@ref RTE_EVENT_DEV_CAP_EVENT_QOS capability) when making packet scheduling decisions. 266 * 267 * @see rte_event_queue_setup() 268 * @see rte_event_queue_attr_set() 269 */ 270 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1) 271 /**< Event scheduling prioritization is based on the priority associated with 272 * each event. 273 * 274 * Priority of each event is supplied in *rte_event* structure 275 * on each enqueue operation. 276 * If this capability is not set, the priority field of the event structure 277 * is ignored for each event. 278 * 279 * NOTE: A device may use both queue prioritization (@ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability) 280 * and event prioritization when making packet scheduling decisions. 281 282 * @see rte_event_enqueue_burst() 283 */ 284 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2) 285 /**< Event device operates in distributed scheduling mode. 286 * 287 * In distributed scheduling mode, event scheduling happens in HW or 288 * rte_event_dequeue_burst() / rte_event_enqueue_burst() or the combination of these two. 289 * If the flag is not set then eventdev is centralized and thus needs a 290 * dedicated service core that acts as a scheduling thread. 291 * 292 * @see rte_event_dev_service_id_get() 293 */ 294 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3) 295 /**< Event device is capable of accepting enqueued events, of any type 296 * advertised as supported by the device, to all destination queues. 297 * 298 * When this capability is set, and @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag is set 299 * in @ref rte_event_queue_conf.event_queue_cfg, the "schedule_type" field of the 300 * @ref rte_event_queue_conf structure is ignored when a queue is being configured. 301 * Instead the "sched_type" field of each event enqueued is used to 302 * select the scheduling to be performed on that event. 303 * 304 * If this capability is not set, or the configuration flag is not set, 305 * the queue only supports events of the *RTE_SCHED_TYPE_* type specified 306 * in the @ref rte_event_queue_conf structure at time of configuration. 307 * The behaviour when events of other scheduling types are sent to the queue is 308 * undefined. 309 * 310 * @see RTE_EVENT_QUEUE_CFG_ALL_TYPES 311 * @see RTE_SCHED_TYPE_ATOMIC 312 * @see RTE_SCHED_TYPE_ORDERED 313 * @see RTE_SCHED_TYPE_PARALLEL 314 * @see rte_event_queue_conf.event_queue_cfg 315 * @see rte_event_queue_conf.schedule_type 316 * @see rte_event_enqueue_burst() 317 */ 318 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4) 319 /**< Event device is capable of operating in burst mode for enqueue(forward, 320 * release) and dequeue operation. 321 * 322 * If this capability is not set, application 323 * can still use the rte_event_dequeue_burst() and rte_event_enqueue_burst() but 324 * PMD accepts or returns only one event at a time. 325 * 326 * @see rte_event_dequeue_burst() 327 * @see rte_event_enqueue_burst() 328 */ 329 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5) 330 /**< Event device ports support disabling the implicit release feature, in 331 * which the port will release all unreleased events in its dequeue operation. 332 * 333 * If this capability is set and the port is configured with implicit release 334 * disabled, the application is responsible for explicitly releasing events 335 * using either the @ref RTE_EVENT_OP_FORWARD or the @ref RTE_EVENT_OP_RELEASE event 336 * enqueue operations. 337 * 338 * @see rte_event_dequeue_burst() 339 * @see rte_event_enqueue_burst() 340 */ 341 342 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6) 343 /**< Event device is capable of operating in non-sequential mode. 344 * 345 * The path of the event is not necessary to be sequential. Application can change 346 * the path of event at runtime and events may be sent to queues in any order. 347 * 348 * If the flag is not set, then event each event will follow a path from queue 0 349 * to queue 1 to queue 2 etc. 350 * The eventdev will return an error when the application enqueues an event for a 351 * qid which is not the next in the sequence. 352 */ 353 354 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7) 355 /**< Event device is capable of reconfiguring the queue/port link at runtime. 356 * 357 * If the flag is not set, the eventdev queue/port link is only can be 358 * configured during initialization, or by stopping the device and 359 * then later restarting it after reconfiguration. 360 * 361 * @see rte_event_port_link() 362 * @see rte_event_port_unlink() 363 */ 364 365 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8) 366 /**< Event device is capable of setting up links between multiple queues and a single port. 367 * 368 * If the flag is not set, each port may only be linked to a single queue, and 369 * so can only receive events from that queue. 370 * However, each queue may be linked to multiple ports. 371 * 372 * @see rte_event_port_link() 373 */ 374 375 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9) 376 /**< Event device preserves the flow ID from the enqueued event to the dequeued event. 377 * 378 * If this flag is not set, 379 * the content of the flow-id field in dequeued events is implementation dependent. 380 * 381 * @see rte_event_dequeue_burst() 382 */ 383 384 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10) 385 /**< Event device *does not* require calls to rte_event_maintain(). 386 * 387 * An event device that does not set this flag requires calls to 388 * rte_event_maintain() during periods when neither 389 * rte_event_dequeue_burst() nor rte_event_enqueue_burst() are called 390 * on a port. This will allow the event device to perform internal 391 * processing, such as flushing buffered events, return credits to a 392 * global pool, or process signaling related to load balancing. 393 * 394 * @see rte_event_maintain() 395 */ 396 397 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11) 398 /**< Event device is capable of changing the queue attributes at runtime i.e 399 * after rte_event_queue_setup() or rte_event_dev_start() call sequence. 400 * 401 * If this flag is not set, event queue attributes can only be configured during 402 * rte_event_queue_setup(). 403 * 404 * @see rte_event_queue_setup() 405 */ 406 407 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12) 408 /**< Event device is capable of supporting multiple link profiles per event port. 409 * 410 * When set, the value of `rte_event_dev_info::max_profiles_per_port` is greater 411 * than one, and multiple profiles may be configured and then switched at runtime. 412 * If not set, only a single profile may be configured, which may itself be 413 * runtime adjustable (if @ref RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK is set). 414 * 415 * @see rte_event_port_profile_links_set() 416 * @see rte_event_port_profile_links_get() 417 * @see rte_event_port_profile_switch() 418 * @see RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK 419 */ 420 421 #define RTE_EVENT_DEV_CAP_ATOMIC (1ULL << 13) 422 /**< Event device is capable of atomic scheduling. 423 * When this flag is set, the application can configure queues with scheduling type 424 * atomic on this event device. 425 * 426 * @see RTE_SCHED_TYPE_ATOMIC 427 */ 428 429 #define RTE_EVENT_DEV_CAP_ORDERED (1ULL << 14) 430 /**< Event device is capable of ordered scheduling. 431 * When this flag is set, the application can configure queues with scheduling type 432 * ordered on this event device. 433 * 434 * @see RTE_SCHED_TYPE_ORDERED 435 */ 436 437 #define RTE_EVENT_DEV_CAP_PARALLEL (1ULL << 15) 438 /**< Event device is capable of parallel scheduling. 439 * When this flag is set, the application can configure queues with scheduling type 440 * parallel on this event device. 441 * 442 * @see RTE_SCHED_TYPE_PARALLEL 443 */ 444 445 #define RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ (1ULL << 16) 446 /**< Event device is capable of independent enqueue. 447 * A new capability, RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ, will indicate that Eventdev 448 * supports the enqueue in any order or specifically in a different order than the 449 * dequeue. Eventdev PMD can either dequeue events in the changed order in which 450 * they are enqueued or restore the original order before sending them to the 451 * underlying hardware device. A flag is provided during the port configuration to 452 * inform Eventdev PMD that the application intends to use an independent enqueue 453 * order on a particular port. Note that this capability only matters for eventdevs 454 * supporting burst mode. 455 * 456 * When an implicit release is enabled on a port, Eventdev PMD will also handle 457 * the insertion of RELEASE events in place of dropped events. The independent enqueue 458 * feature only applies to FORWARD and RELEASE events. New events (op=RTE_EVENT_OP_NEW) 459 * will be dequeued in the order the application enqueues them and do not maintain 460 * any order relative to FORWARD/RELEASE events. FORWARD vs NEW relaxed ordering 461 * only applies to ports that have enabled independent enqueue feature. 462 */ 463 464 #define RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE (1ULL << 17) 465 /**< Event device supports event pre-scheduling. 466 * 467 * When this capability is available, the application can enable event pre-scheduling on the event 468 * device to pre-schedule events to a event port when `rte_event_dequeue_burst()` 469 * is issued. 470 * The pre-schedule process starts with the `rte_event_dequeue_burst()` call and the 471 * pre-scheduled events are returned on the next `rte_event_dequeue_burst()` call. 472 * 473 * @see rte_event_dev_configure() 474 */ 475 476 #define RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE (1ULL << 18) 477 /**< Event device supports adaptive event pre-scheduling. 478 * 479 * When this capability is available, the application can enable adaptive pre-scheduling 480 * on the event device where the events are pre-scheduled when there are no forward 481 * progress constraints with the currently held flow contexts. 482 * The pre-schedule process starts with the `rte_event_dequeue_burst()` call and the 483 * pre-scheduled events are returned on the next `rte_event_dequeue_burst()` call. 484 * 485 * @see rte_event_dev_configure() 486 */ 487 488 #define RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE (1ULL << 19) 489 /**< Event device supports event pre-scheduling per event port. 490 * 491 * When this flag is set, the event device allows controlling the event 492 * pre-scheduling at a event port granularity. 493 * 494 * @see rte_event_dev_configure() 495 * @see rte_event_port_preschedule_modify() 496 */ 497 498 #define RTE_EVENT_DEV_CAP_PRESCHEDULE_EXPLICIT (1ULL << 20) 499 /**< Event device supports explicit pre-scheduling. 500 * 501 * When this flag is set, the application can issue pre-schedule request on 502 * a event port. 503 * 504 * @see rte_event_port_preschedule() 505 */ 506 507 /* Event device priority levels */ 508 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0 509 /**< Highest priority level for events and queues. 510 * 511 * @see rte_event_queue_setup() 512 * @see rte_event_enqueue_burst() 513 * @see rte_event_port_link() 514 */ 515 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128 516 /**< Normal priority level for events and queues. 517 * 518 * @see rte_event_queue_setup() 519 * @see rte_event_enqueue_burst() 520 * @see rte_event_port_link() 521 */ 522 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255 523 /**< Lowest priority level for events and queues. 524 * 525 * @see rte_event_queue_setup() 526 * @see rte_event_enqueue_burst() 527 * @see rte_event_port_link() 528 */ 529 530 /* Event queue scheduling weights */ 531 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255 532 /**< Highest weight of an event queue. 533 * 534 * @see rte_event_queue_attr_get() 535 * @see rte_event_queue_attr_set() 536 */ 537 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0 538 /**< Lowest weight of an event queue. 539 * 540 * @see rte_event_queue_attr_get() 541 * @see rte_event_queue_attr_set() 542 */ 543 544 /* Event queue scheduling affinity */ 545 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255 546 /**< Highest scheduling affinity of an event queue. 547 * 548 * @see rte_event_queue_attr_get() 549 * @see rte_event_queue_attr_set() 550 */ 551 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0 552 /**< Lowest scheduling affinity of an event queue. 553 * 554 * @see rte_event_queue_attr_get() 555 * @see rte_event_queue_attr_set() 556 */ 557 558 /** 559 * Get the total number of event devices. 560 * 561 * @return 562 * The total number of usable event devices. 563 */ 564 uint8_t 565 rte_event_dev_count(void); 566 567 /** 568 * Get the device identifier for the named event device. 569 * 570 * @param name 571 * Event device name to select the event device identifier. 572 * 573 * @return 574 * Event device identifier (dev_id >= 0) on success. 575 * Negative error code on failure: 576 * - -EINVAL - input name parameter is invalid. 577 * - -ENODEV - no event device found with that name. 578 */ 579 int 580 rte_event_dev_get_dev_id(const char *name); 581 582 /** 583 * Return the NUMA socket to which a device is connected. 584 * 585 * @param dev_id 586 * The identifier of the device. 587 * @return 588 * The NUMA socket id to which the device is connected or 589 * a default of zero if the socket could not be determined. 590 * -EINVAL on error, where the given dev_id value does not 591 * correspond to any event device. 592 */ 593 int 594 rte_event_dev_socket_id(uint8_t dev_id); 595 596 /** 597 * Event device information 598 */ 599 struct rte_event_dev_info { 600 const char *driver_name; /**< Event driver name. */ 601 struct rte_device *dev; /**< Device information. */ 602 uint32_t min_dequeue_timeout_ns; 603 /**< Minimum global dequeue timeout(ns) supported by this device. */ 604 uint32_t max_dequeue_timeout_ns; 605 /**< Maximum global dequeue timeout(ns) supported by this device. */ 606 uint32_t dequeue_timeout_ns; 607 /**< Configured global dequeue timeout(ns) for this device. */ 608 uint8_t max_event_queues; 609 /**< Maximum event queues supported by this device. 610 * 611 * This count excludes any queues covered by @ref max_single_link_event_port_queue_pairs. 612 */ 613 uint32_t max_event_queue_flows; 614 /**< Maximum number of flows within an event queue supported by this device. */ 615 uint8_t max_event_queue_priority_levels; 616 /**< Maximum number of event queue priority levels supported by this device. 617 * 618 * Valid when the device has @ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability. 619 * 620 * The implementation shall normalize priority values specified between 621 * @ref RTE_EVENT_DEV_PRIORITY_HIGHEST and @ref RTE_EVENT_DEV_PRIORITY_LOWEST 622 * to map them internally to this range of priorities. 623 * [For devices supporting a power-of-2 number of priority levels, this 624 * normalization will be done via a right-shift operation, so only the top 625 * log2(max_levels) bits will be used by the event device.] 626 * 627 * @see rte_event_queue_conf.priority 628 */ 629 uint8_t max_event_priority_levels; 630 /**< Maximum number of event priority levels by this device. 631 * 632 * Valid when the device has @ref RTE_EVENT_DEV_CAP_EVENT_QOS capability. 633 * 634 * The implementation shall normalize priority values specified between 635 * @ref RTE_EVENT_DEV_PRIORITY_HIGHEST and @ref RTE_EVENT_DEV_PRIORITY_LOWEST 636 * to map them internally to this range of priorities. 637 * [For devices supporting a power-of-2 number of priority levels, this 638 * normalization will be done via a right-shift operation, so only the top 639 * log2(max_levels) bits will be used by the event device.] 640 * 641 * @see rte_event.priority 642 */ 643 uint8_t max_event_ports; 644 /**< Maximum number of event ports supported by this device. 645 * 646 * This count excludes any ports covered by @ref max_single_link_event_port_queue_pairs. 647 */ 648 uint8_t max_event_port_dequeue_depth; 649 /**< Maximum number of events that can be dequeued at a time from an event port 650 * on this device. 651 * 652 * A device that does not support burst dequeue 653 * (@ref RTE_EVENT_DEV_CAP_BURST_MODE) will set this to 1. 654 */ 655 uint32_t max_event_port_enqueue_depth; 656 /**< Maximum number of events that can be enqueued at a time to an event port 657 * on this device. 658 * 659 * A device that does not support burst enqueue 660 * (@ref RTE_EVENT_DEV_CAP_BURST_MODE) will set this to 1. 661 */ 662 uint8_t max_event_port_links; 663 /**< Maximum number of queues that can be linked to a single event port on this device. 664 */ 665 int32_t max_num_events; 666 /**< A *closed system* event dev has a limit on the number of events it 667 * can manage at a time. 668 * Once the number of events tracked by an eventdev exceeds this number, 669 * any enqueues of NEW events will fail. 670 * An *open system* event dev does not have a limit and will specify this as -1. 671 */ 672 uint32_t event_dev_cap; 673 /**< Event device capabilities flags (RTE_EVENT_DEV_CAP_*). */ 674 uint8_t max_single_link_event_port_queue_pairs; 675 /**< Maximum number of event ports and queues, supported by this device, 676 * that are optimized for (and only capable of) single-link configurations. 677 * These ports and queues are not accounted for in @ref max_event_ports 678 * or @ref max_event_queues. 679 */ 680 uint8_t max_profiles_per_port; 681 /**< Maximum number of event queue link profiles per event port. 682 * A device that doesn't support multiple profiles will set this as 1. 683 */ 684 }; 685 686 /** 687 * Retrieve details of an event device's capabilities and configuration limits. 688 * 689 * @param dev_id 690 * The identifier of the device. 691 * 692 * @param[out] dev_info 693 * A pointer to a structure of type *rte_event_dev_info* to be filled with the 694 * information about the device's capabilities. 695 * 696 * @return 697 * - 0: Success, information about the event device is present in dev_info. 698 * - <0: Failure, error code returned by the function. 699 * - -EINVAL - invalid input parameters, e.g. incorrect device id. 700 * - -ENOTSUP - device does not support returning capabilities information. 701 */ 702 int 703 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info); 704 705 /** 706 * The count of ports. 707 */ 708 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0 709 /** 710 * The count of queues. 711 */ 712 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1 713 /** 714 * The status of the device, zero for stopped, non-zero for started. 715 */ 716 #define RTE_EVENT_DEV_ATTR_STARTED 2 717 718 /** 719 * Get an attribute from a device. 720 * 721 * @param dev_id Eventdev id 722 * @param attr_id The attribute ID to retrieve 723 * @param[out] attr_value A pointer that will be filled in with the attribute 724 * value if successful. 725 * 726 * @return 727 * - 0: Successfully retrieved attribute value 728 * - -EINVAL: Invalid device or *attr_id* provided, or *attr_value* is NULL 729 */ 730 int 731 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, 732 uint32_t *attr_value); 733 734 735 /* Event device configuration bitmap flags */ 736 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0) 737 /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns. 738 * @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst() 739 */ 740 741 /** Event device pre-schedule type enumeration. */ 742 enum rte_event_dev_preschedule_type { 743 RTE_EVENT_PRESCHEDULE_NONE, 744 /**< Disable pre-schedule across the event device or on a given event port. 745 * @ref rte_event_dev_config.preschedule_type 746 * @ref rte_event_port_preschedule_modify() 747 */ 748 RTE_EVENT_PRESCHEDULE, 749 /**< Enable pre-schedule always across the event device or a given event port. 750 * @ref rte_event_dev_config.preschedule_type 751 * @ref rte_event_port_preschedule_modify() 752 * @see RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE 753 * @see RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE 754 */ 755 RTE_EVENT_PRESCHEDULE_ADAPTIVE, 756 /**< Enable adaptive pre-schedule across the event device or a given event port. 757 * Delay issuing pre-schedule until there are no forward progress constraints with 758 * the held flow contexts. 759 * @ref rte_event_dev_config.preschedule_type 760 * @ref rte_event_port_preschedule_modify() 761 * @see RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE 762 * @see RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE 763 */ 764 }; 765 766 /** Event device configuration structure */ 767 struct rte_event_dev_config { 768 uint32_t dequeue_timeout_ns; 769 /**< rte_event_dequeue_burst() timeout on this device. 770 * This value should be in the range of @ref rte_event_dev_info.min_dequeue_timeout_ns and 771 * @ref rte_event_dev_info.max_dequeue_timeout_ns returned by 772 * @ref rte_event_dev_info_get() 773 * The value 0 is allowed, in which case, default dequeue timeout used. 774 * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT 775 */ 776 int32_t nb_events_limit; 777 /**< In a *closed system* this field is the limit on maximum number of 778 * events that can be inflight in the eventdev at a given time. The 779 * limit is required to ensure that the finite space in a closed system 780 * is not exhausted. 781 * The value cannot exceed @ref rte_event_dev_info.max_num_events 782 * returned by rte_event_dev_info_get(). 783 * 784 * This value should be set to -1 for *open systems*, that is, 785 * those systems returning -1 in @ref rte_event_dev_info.max_num_events. 786 * 787 * @see rte_event_port_conf.new_event_threshold 788 */ 789 uint8_t nb_event_queues; 790 /**< Number of event queues to configure on this device. 791 * This value *includes* any single-link queue-port pairs to be used. 792 * This value cannot exceed @ref rte_event_dev_info.max_event_queues + 793 * @ref rte_event_dev_info.max_single_link_event_port_queue_pairs 794 * returned by rte_event_dev_info_get(). 795 * The number of non-single-link queues i.e. this value less 796 * *nb_single_link_event_port_queues* in this struct, cannot exceed 797 * @ref rte_event_dev_info.max_event_queues 798 */ 799 uint8_t nb_event_ports; 800 /**< Number of event ports to configure on this device. 801 * This value *includes* any single-link queue-port pairs to be used. 802 * This value cannot exceed @ref rte_event_dev_info.max_event_ports + 803 * @ref rte_event_dev_info.max_single_link_event_port_queue_pairs 804 * returned by rte_event_dev_info_get(). 805 * The number of non-single-link ports i.e. this value less 806 * *nb_single_link_event_port_queues* in this struct, cannot exceed 807 * @ref rte_event_dev_info.max_event_ports 808 */ 809 uint32_t nb_event_queue_flows; 810 /**< Max number of flows needed for a single event queue on this device. 811 * This value cannot exceed @ref rte_event_dev_info.max_event_queue_flows 812 * returned by rte_event_dev_info_get() 813 */ 814 uint32_t nb_event_port_dequeue_depth; 815 /**< Max number of events that can be dequeued at a time from an event port on this device. 816 * This value cannot exceed @ref rte_event_dev_info.max_event_port_dequeue_depth 817 * returned by rte_event_dev_info_get(). 818 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. 819 * @see rte_event_port_setup() rte_event_dequeue_burst() 820 */ 821 uint32_t nb_event_port_enqueue_depth; 822 /**< Maximum number of events can be enqueued at a time to an event port on this device. 823 * This value cannot exceed @ref rte_event_dev_info.max_event_port_enqueue_depth 824 * returned by rte_event_dev_info_get(). 825 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. 826 * @see rte_event_port_setup() rte_event_enqueue_burst() 827 */ 828 uint32_t event_dev_cfg; 829 /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/ 830 uint8_t nb_single_link_event_port_queues; 831 /**< Number of event ports and queues that will be singly-linked to 832 * each other. These are a subset of the overall event ports and 833 * queues; this value cannot exceed *nb_event_ports* or 834 * *nb_event_queues*. If the device has ports and queues that are 835 * optimized for single-link usage, this field is a hint for how many 836 * to allocate; otherwise, regular event ports and queues will be used. 837 */ 838 enum rte_event_dev_preschedule_type preschedule_type; 839 /**< Event pre-schedule type to use across the event device, if supported. 840 * @see RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE 841 * @see RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE 842 */ 843 }; 844 845 /** 846 * Configure an event device. 847 * 848 * This function must be invoked before any other configuration function in the 849 * API, when preparing an event device for application use. 850 * This function can also be re-invoked when a device is in the stopped state. 851 * 852 * The caller should use rte_event_dev_info_get() to get the capabilities and 853 * resource limits for this event device before calling this API. 854 * Many values in the dev_conf input parameter are subject to limits given 855 * in the device information returned from rte_event_dev_info_get(). 856 * 857 * @param dev_id 858 * The identifier of the device to configure. 859 * @param dev_conf 860 * The event device configuration structure. 861 * 862 * @return 863 * - 0: Success, device configured. 864 * - <0: Error code returned by the driver configuration function. 865 * - -ENOTSUP - device does not support configuration. 866 * - -EINVAL - invalid input parameter. 867 * - -EBUSY - device has already been started. 868 */ 869 int 870 rte_event_dev_configure(uint8_t dev_id, 871 const struct rte_event_dev_config *dev_conf); 872 873 /* Event queue specific APIs */ 874 875 /* Event queue configuration bitmap flags */ 876 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0) 877 /**< Allow events with schedule types ATOMIC, ORDERED, and PARALLEL to be enqueued to this queue. 878 * 879 * The scheduling type to be used is that specified in each individual event. 880 * This flag can only be set when configuring queues on devices reporting the 881 * @ref RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES capability. 882 * 883 * Without this flag, only events with the specific scheduling type configured at queue setup 884 * can be sent to the queue. 885 * 886 * @see RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES 887 * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL 888 * @see rte_event_enqueue_burst() 889 */ 890 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1) 891 /**< This event queue links only to a single event port. 892 * 893 * No load-balancing of events is performed, as all events 894 * sent to this queue end up at the same event port. 895 * The number of queues on which this flag is to be set must be 896 * configured at device configuration time, by setting 897 * @ref rte_event_dev_config.nb_single_link_event_port_queues 898 * parameter appropriately. 899 * 900 * This flag serves as a hint only, any devices without specific 901 * support for single-link queues can fall-back automatically to 902 * using regular queues with a single destination port. 903 * 904 * @see rte_event_dev_info.max_single_link_event_port_queue_pairs 905 * @see rte_event_dev_config.nb_single_link_event_port_queues 906 * @see rte_event_port_setup(), rte_event_port_link() 907 */ 908 909 /** Event queue configuration structure */ 910 struct rte_event_queue_conf { 911 uint32_t nb_atomic_flows; 912 /**< The maximum number of active flows this queue can track at any 913 * given time. 914 * 915 * If the queue is configured for atomic scheduling (by 916 * applying the @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to 917 * @ref rte_event_queue_conf.event_queue_cfg 918 * or @ref RTE_SCHED_TYPE_ATOMIC flag to @ref rte_event_queue_conf.schedule_type), then the 919 * value must be in the range of [1, @ref rte_event_dev_config.nb_event_queue_flows], 920 * which was previously provided in rte_event_dev_configure(). 921 * 922 * If the queue is not configured for atomic scheduling this value is ignored. 923 */ 924 uint32_t nb_atomic_order_sequences; 925 /**< The maximum number of outstanding events waiting to be 926 * reordered by this queue. In other words, the number of entries in 927 * this queue’s reorder buffer. When the number of events in the 928 * reorder buffer reaches to *nb_atomic_order_sequences* then the 929 * scheduler cannot schedule the events from this queue and no 930 * events will be returned from dequeue until one or more entries are 931 * freed up/released. 932 * 933 * If the queue is configured for ordered scheduling (by applying the 934 * @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to @ref rte_event_queue_conf.event_queue_cfg or 935 * @ref RTE_SCHED_TYPE_ORDERED flag to @ref rte_event_queue_conf.schedule_type), 936 * then the value must be in the range of 937 * [1, @ref rte_event_dev_config.nb_event_queue_flows], which was 938 * previously supplied to rte_event_dev_configure(). 939 * 940 * If the queue is not configured for ordered scheduling, then this value is ignored. 941 */ 942 uint32_t event_queue_cfg; 943 /**< Queue cfg flags(EVENT_QUEUE_CFG_) */ 944 uint8_t schedule_type; 945 /**< Queue schedule type(RTE_SCHED_TYPE_*). 946 * 947 * Valid when @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag is not set in 948 * @ref rte_event_queue_conf.event_queue_cfg. 949 * 950 * If the @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag is set, then this field is ignored. 951 * 952 * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL 953 */ 954 uint8_t priority; 955 /**< Priority for this event queue relative to other event queues. 956 * 957 * The requested priority should in the range of 958 * [@ref RTE_EVENT_DEV_PRIORITY_HIGHEST, @ref RTE_EVENT_DEV_PRIORITY_LOWEST]. 959 * The implementation shall normalize the requested priority to 960 * event device supported priority value. 961 * 962 * Valid when the device has @ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability, 963 * ignored otherwise 964 */ 965 uint8_t weight; 966 /**< Weight of the event queue relative to other event queues. 967 * 968 * The requested weight should be in the range of 969 * [@ref RTE_EVENT_QUEUE_WEIGHT_HIGHEST, @ref RTE_EVENT_QUEUE_WEIGHT_LOWEST]. 970 * The implementation shall normalize the requested weight to event 971 * device supported weight value. 972 * 973 * Valid when the device has @ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability, 974 * ignored otherwise. 975 */ 976 uint8_t affinity; 977 /**< Affinity of the event queue relative to other event queues. 978 * 979 * The requested affinity should be in the range of 980 * [@ref RTE_EVENT_QUEUE_AFFINITY_HIGHEST, @ref RTE_EVENT_QUEUE_AFFINITY_LOWEST]. 981 * The implementation shall normalize the requested affinity to event 982 * device supported affinity value. 983 * 984 * Valid when the device has @ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability, 985 * ignored otherwise. 986 */ 987 }; 988 989 /** 990 * Retrieve the default configuration information of an event queue designated 991 * by its *queue_id* from the event driver for an event device. 992 * 993 * This function intended to be used in conjunction with rte_event_queue_setup() 994 * where caller needs to set up the queue by overriding few default values. 995 * 996 * @param dev_id 997 * The identifier of the device. 998 * @param queue_id 999 * The index of the event queue to get the configuration information. 1000 * The value must be less than @ref rte_event_dev_config.nb_event_queues 1001 * previously supplied to rte_event_dev_configure(). 1002 * @param[out] queue_conf 1003 * The pointer to the default event queue configuration data. 1004 * @return 1005 * - 0: Success, driver updates the default event queue configuration data. 1006 * - <0: Error code returned by the driver info get function. 1007 * 1008 * @see rte_event_queue_setup() 1009 */ 1010 int 1011 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, 1012 struct rte_event_queue_conf *queue_conf); 1013 1014 /** 1015 * Allocate and set up an event queue for an event device. 1016 * 1017 * @param dev_id 1018 * The identifier of the device. 1019 * @param queue_id 1020 * The index of the event queue to setup. The value must be 1021 * less than @ref rte_event_dev_config.nb_event_queues previously supplied to 1022 * rte_event_dev_configure(). 1023 * @param queue_conf 1024 * The pointer to the configuration data to be used for the event queue. 1025 * NULL value is allowed, in which case default configuration used. 1026 * 1027 * @see rte_event_queue_default_conf_get() 1028 * 1029 * @return 1030 * - 0: Success, event queue correctly set up. 1031 * - <0: event queue configuration failed. 1032 */ 1033 int 1034 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, 1035 const struct rte_event_queue_conf *queue_conf); 1036 1037 /** 1038 * Queue attribute id for the priority of the queue. 1039 */ 1040 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0 1041 /** 1042 * Queue attribute id for the number of atomic flows configured for the queue. 1043 */ 1044 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1 1045 /** 1046 * Queue attribute id for the number of atomic order sequences configured for the queue. 1047 */ 1048 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2 1049 /** 1050 * Queue attribute id for the configuration flags for the queue. 1051 */ 1052 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3 1053 /** 1054 * Queue attribute id for the schedule type of the queue. 1055 */ 1056 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4 1057 /** 1058 * Queue attribute id for the weight of the queue. 1059 */ 1060 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5 1061 /** 1062 * Queue attribute id for the affinity of the queue. 1063 */ 1064 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6 1065 1066 /** 1067 * Get an attribute of an event queue. 1068 * 1069 * @param dev_id 1070 * The identifier of the device. 1071 * @param queue_id 1072 * The index of the event queue to query. The value must be less than 1073 * @ref rte_event_dev_config.nb_event_queues previously supplied to rte_event_dev_configure(). 1074 * @param attr_id 1075 * The attribute ID to retrieve (RTE_EVENT_QUEUE_ATTR_*). 1076 * @param[out] attr_value 1077 * A pointer that will be filled in with the attribute value if successful. 1078 * 1079 * @return 1080 * - 0: Successfully returned value 1081 * - -EINVAL: invalid device, queue or attr_id provided, or attr_value was NULL. 1082 * - -EOVERFLOW: returned when attr_id is set to 1083 * @ref RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES is 1084 * set in the queue configuration flags. 1085 */ 1086 int 1087 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, 1088 uint32_t *attr_value); 1089 1090 /** 1091 * Set an event queue attribute. 1092 * 1093 * @param dev_id 1094 * The identifier of the device. 1095 * @param queue_id 1096 * The index of the event queue to configure. The value must be less than 1097 * @ref rte_event_dev_config.nb_event_queues previously supplied to rte_event_dev_configure(). 1098 * @param attr_id 1099 * The attribute ID to set (RTE_EVENT_QUEUE_ATTR_*). 1100 * @param attr_value 1101 * The attribute value to set. 1102 * 1103 * @return 1104 * - 0: Successfully set attribute. 1105 * - <0: failed to set event queue attribute. 1106 * - -EINVAL: invalid device, queue or attr_id. 1107 * - -ENOTSUP: device does not support setting the event attribute. 1108 */ 1109 int 1110 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, 1111 uint64_t attr_value); 1112 1113 /* Event port specific APIs */ 1114 1115 /* Event port configuration bitmap flags */ 1116 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0) 1117 /**< Configure the port not to release outstanding events in 1118 * rte_event_dev_dequeue_burst(). If set, all events received through 1119 * the port must be explicitly released with RTE_EVENT_OP_RELEASE or 1120 * RTE_EVENT_OP_FORWARD. Must be unset if the device is not 1121 * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable. 1122 */ 1123 #define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1) 1124 /**< This event port links only to a single event queue. 1125 * The queue it links with should be similarly configured with the 1126 * @ref RTE_EVENT_QUEUE_CFG_SINGLE_LINK flag. 1127 * 1128 * @see RTE_EVENT_QUEUE_CFG_SINGLE_LINK 1129 * @see rte_event_port_setup(), rte_event_port_link() 1130 */ 1131 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER (1ULL << 2) 1132 /**< Hint that this event port will primarily enqueue events to the system. 1133 * A PMD can optimize its internal workings by assuming that this port is 1134 * primarily going to enqueue NEW events. 1135 * 1136 * Note that this flag is only a hint, so PMDs must operate under the 1137 * assumption that any port can enqueue an event with any type of op. 1138 * 1139 * @see rte_event_port_setup() 1140 */ 1141 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER (1ULL << 3) 1142 /**< Hint that this event port will primarily dequeue events from the system. 1143 * A PMD can optimize its internal workings by assuming that this port is 1144 * primarily going to consume events, and not enqueue NEW or FORWARD 1145 * events. 1146 * 1147 * Note that this flag is only a hint, so PMDs must operate under the 1148 * assumption that any port can enqueue an event with any type of op. 1149 * 1150 * @see rte_event_port_setup() 1151 */ 1152 #define RTE_EVENT_PORT_CFG_HINT_WORKER (1ULL << 4) 1153 /**< Hint that this event port will primarily pass existing events through. 1154 * A PMD can optimize its internal workings by assuming that this port is 1155 * primarily going to FORWARD events, and not enqueue NEW or RELEASE events 1156 * often. 1157 * 1158 * Note that this flag is only a hint, so PMDs must operate under the 1159 * assumption that any port can enqueue an event with any type of op. 1160 * 1161 * @see rte_event_port_setup() 1162 */ 1163 #define RTE_EVENT_PORT_CFG_INDEPENDENT_ENQ (1ULL << 5) 1164 /**< Flag to enable independent enqueue. Must not be set if the device 1165 * is not RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ capable. This feature 1166 * allows an application to enqueue RTE_EVENT_OP_FORWARD or 1167 * RTE_EVENT_OP_RELEASE in an order different than the order the 1168 * events were dequeued from the event device, while maintaining 1169 * RTE_SCHED_TYPE_ATOMIC or RTE_SCHED_TYPE_ORDERED semantics. 1170 * 1171 * Note that this flag only matters for Eventdevs supporting burst mode. 1172 * 1173 * @see rte_event_port_setup() 1174 */ 1175 1176 /** Event port configuration structure */ 1177 struct rte_event_port_conf { 1178 int32_t new_event_threshold; 1179 /**< A backpressure threshold for new event enqueues on this port. 1180 * Use for *closed system* event dev where event capacity is limited, 1181 * and cannot exceed the capacity of the event dev. 1182 * 1183 * Configuring ports with different thresholds can make higher priority 1184 * traffic less likely to be backpressured. 1185 * For example, a port used to inject NIC Rx packets into the event dev 1186 * can have a lower threshold so as not to overwhelm the device, 1187 * while ports used for worker pools can have a higher threshold. 1188 * This value cannot exceed the @ref rte_event_dev_config.nb_events_limit value 1189 * which was previously supplied to rte_event_dev_configure(). 1190 * 1191 * This should be set to '-1' for *open system*, i.e when 1192 * @ref rte_event_dev_info.max_num_events == -1. 1193 */ 1194 uint16_t dequeue_depth; 1195 /**< Configure the maximum size of burst dequeues for this event port. 1196 * This value cannot exceed the @ref rte_event_dev_config.nb_event_port_dequeue_depth value 1197 * which was previously supplied to rte_event_dev_configure(). 1198 * 1199 * Ignored when device does not support the @ref RTE_EVENT_DEV_CAP_BURST_MODE capability. 1200 */ 1201 uint16_t enqueue_depth; 1202 /**< Configure the maximum size of burst enqueues to this event port. 1203 * This value cannot exceed the @ref rte_event_dev_config.nb_event_port_enqueue_depth value 1204 * which was previously supplied to rte_event_dev_configure(). 1205 * 1206 * Ignored when device does not support the @ref RTE_EVENT_DEV_CAP_BURST_MODE capability. 1207 */ 1208 uint32_t event_port_cfg; /**< Port configuration flags(EVENT_PORT_CFG_) */ 1209 }; 1210 1211 /** 1212 * Retrieve the default configuration information of an event port designated 1213 * by its *port_id* from the event driver for an event device. 1214 * 1215 * This function is intended to be used in conjunction with rte_event_port_setup() 1216 * where the caller can set up the port by just overriding few default values. 1217 * 1218 * @param dev_id 1219 * The identifier of the device. 1220 * @param port_id 1221 * The index of the event port to get the configuration information. 1222 * The value must be less than @ref rte_event_dev_config.nb_event_ports 1223 * previously supplied to rte_event_dev_configure(). 1224 * @param[out] port_conf 1225 * The pointer to a structure to store the default event port configuration data. 1226 * @return 1227 * - 0: Success, driver updates the default event port configuration data. 1228 * - <0: Error code returned by the driver info get function. 1229 * - -EINVAL - invalid input parameter. 1230 * - -ENOTSUP - function is not supported for this device. 1231 * 1232 * @see rte_event_port_setup() 1233 */ 1234 int 1235 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, 1236 struct rte_event_port_conf *port_conf); 1237 1238 /** 1239 * Allocate and set up an event port for an event device. 1240 * 1241 * @param dev_id 1242 * The identifier of the device. 1243 * @param port_id 1244 * The index of the event port to setup. The value must be less than 1245 * @ref rte_event_dev_config.nb_event_ports previously supplied to 1246 * rte_event_dev_configure(). 1247 * @param port_conf 1248 * The pointer to the configuration data to be used for the port. 1249 * NULL value is allowed, in which case the default configuration is used. 1250 * 1251 * @see rte_event_port_default_conf_get() 1252 * 1253 * @return 1254 * - 0: Success, event port correctly set up. 1255 * - <0: Port configuration failed. 1256 * - -EINVAL - Invalid input parameter. 1257 * - -EBUSY - Port already started. 1258 * - -ENOTSUP - Function not supported on this device, or a NULL pointer passed 1259 * as the port_conf parameter, and no default configuration function available 1260 * for this device. 1261 * - -EDQUOT - Application tried to link a queue configured 1262 * with @ref RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event port. 1263 */ 1264 int 1265 rte_event_port_setup(uint8_t dev_id, uint8_t port_id, 1266 const struct rte_event_port_conf *port_conf); 1267 1268 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id, 1269 struct rte_event event, void *arg); 1270 /**< Callback function prototype that can be passed during 1271 * rte_event_port_release(), invoked once per a released event. 1272 */ 1273 1274 /** 1275 * Quiesce any core specific resources consumed by the event port. 1276 * 1277 * Event ports are generally coupled with lcores, and a given Hardware 1278 * implementation might require the PMD to store port specific data in the 1279 * lcore. 1280 * When the application decides to migrate the event port to another lcore 1281 * or teardown the current lcore it may to call `rte_event_port_quiesce` 1282 * to make sure that all the data associated with the event port are released 1283 * from the lcore, this might also include any prefetched events. 1284 * While releasing the event port from the lcore, this function calls the 1285 * user-provided flush callback once per event. 1286 * 1287 * @note Invocation of this API does not affect the existing port configuration. 1288 * 1289 * @param dev_id 1290 * The identifier of the device. 1291 * @param port_id 1292 * The index of the event port to quiesce. The value must be less than 1293 * @ref rte_event_dev_config.nb_event_ports previously supplied to rte_event_dev_configure(). 1294 * @param release_cb 1295 * Callback function invoked once per flushed event. 1296 * @param args 1297 * Argument supplied to callback. 1298 */ 1299 void 1300 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, 1301 rte_eventdev_port_flush_t release_cb, void *args); 1302 1303 /** 1304 * Port attribute id for the maximum size of a burst enqueue operation supported on a port. 1305 */ 1306 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0 1307 /** 1308 * Port attribute id for the maximum size of a dequeue burst which can be returned from a port. 1309 */ 1310 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1 1311 /** 1312 * Port attribute id for the new event threshold of the port. 1313 * Once the number of events in the system exceeds this threshold, the enqueue of NEW-type 1314 * events will fail. 1315 */ 1316 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2 1317 /** 1318 * Port attribute id for the implicit release disable attribute of the port. 1319 */ 1320 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3 1321 1322 /** 1323 * Get an attribute from a port. 1324 * 1325 * @param dev_id 1326 * The identifier of the device. 1327 * @param port_id 1328 * The index of the event port to query. The value must be less than 1329 * @ref rte_event_dev_config.nb_event_ports previously supplied to rte_event_dev_configure(). 1330 * @param attr_id 1331 * The attribute ID to retrieve (RTE_EVENT_PORT_ATTR_*) 1332 * @param[out] attr_value 1333 * A pointer that will be filled in with the attribute value if successful 1334 * 1335 * @return 1336 * - 0: Successfully returned value. 1337 * - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL. 1338 */ 1339 int 1340 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, 1341 uint32_t *attr_value); 1342 1343 /** 1344 * Start an event device. 1345 * 1346 * The device start step is the last one in device setup, and enables the event 1347 * ports and queues to start accepting events and scheduling them to event ports. 1348 * 1349 * On success, all basic functions exported by the API (event enqueue, 1350 * event dequeue and so on) can be invoked. 1351 * 1352 * @param dev_id 1353 * Event device identifier. 1354 * @return 1355 * - 0: Success, device started. 1356 * - -EINVAL: Invalid device id provided. 1357 * - -ENOTSUP: Device does not support this operation. 1358 * - -ESTALE : Not all ports of the device are configured. 1359 * - -ENOLINK: Not all queues are linked, which could lead to deadlock. 1360 */ 1361 int 1362 rte_event_dev_start(uint8_t dev_id); 1363 1364 /** 1365 * Stop an event device. 1366 * 1367 * This function causes all queued events to be drained, including those 1368 * residing in event ports. While draining events out of the device, this 1369 * function calls the user-provided flush callback (if one was registered) once 1370 * per event. 1371 * 1372 * The device can be restarted with a call to rte_event_dev_start(). Threads 1373 * that continue to enqueue/dequeue while the device is stopped, or being 1374 * stopped, will result in undefined behavior. This includes event adapters, 1375 * which must be stopped prior to stopping the eventdev. 1376 * 1377 * @param dev_id 1378 * Event device identifier. 1379 * 1380 * @see rte_event_dev_stop_flush_callback_register() 1381 */ 1382 void 1383 rte_event_dev_stop(uint8_t dev_id); 1384 1385 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id, 1386 struct rte_event event, void *arg); 1387 /**< Callback function called during rte_event_dev_stop(), invoked once per 1388 * flushed event. 1389 */ 1390 1391 /** 1392 * Registers a callback function to be invoked during rte_event_dev_stop() for 1393 * each flushed event. This function can be used to properly dispose of queued 1394 * events, for example events containing memory pointers. 1395 * 1396 * The callback function is only registered for the calling process. The 1397 * callback function must be registered in every process that can call 1398 * rte_event_dev_stop(). 1399 * 1400 * Only one callback function may be registered. Each new call replaces 1401 * the existing registered callback function with the new function passed in. 1402 * 1403 * To unregister a callback, call this function with a NULL callback pointer. 1404 * 1405 * @param dev_id 1406 * The identifier of the device. 1407 * @param callback 1408 * Callback function to be invoked once per flushed event. 1409 * Pass NULL to unset any previously-registered callback function. 1410 * @param userdata 1411 * Argument supplied to callback. 1412 * 1413 * @return 1414 * - 0 on success. 1415 * - -EINVAL if *dev_id* is invalid. 1416 * 1417 * @see rte_event_dev_stop() 1418 */ 1419 int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, 1420 rte_eventdev_stop_flush_t callback, void *userdata); 1421 1422 /** 1423 * Close an event device. The device cannot be restarted! 1424 * 1425 * @param dev_id 1426 * Event device identifier. 1427 * 1428 * @return 1429 * - 0 on successfully closing device 1430 * - <0 on failure to close device. 1431 * - -EINVAL - invalid device id. 1432 * - -ENOTSUP - operation not supported for this device. 1433 * - -EAGAIN - device is busy. 1434 */ 1435 int 1436 rte_event_dev_close(uint8_t dev_id); 1437 1438 /** 1439 * Event vector structure. 1440 */ 1441 struct __rte_aligned(16) rte_event_vector { 1442 uint16_t nb_elem; 1443 /**< Number of elements valid in this event vector. */ 1444 uint16_t elem_offset : 12; 1445 /**< Offset into the vector array where valid elements start from. */ 1446 uint16_t rsvd : 3; 1447 /**< Reserved for future use */ 1448 uint16_t attr_valid : 1; 1449 /**< Indicates that the below union attributes have valid information. 1450 */ 1451 union { 1452 /* Used by Rx/Tx adapter. 1453 * Indicates that all the elements in this vector belong to the 1454 * same port and queue pair when originating from Rx adapter, 1455 * valid only when event type is ETHDEV_VECTOR or 1456 * ETH_RX_ADAPTER_VECTOR. 1457 * Can also be used to indicate the Tx adapter the destination 1458 * port and queue of the mbufs in the vector 1459 */ 1460 struct { 1461 uint16_t port; /**< Ethernet device port id. */ 1462 uint16_t queue; /**< Ethernet device queue id. */ 1463 }; 1464 }; 1465 /**< Union to hold common attributes of the vector array. */ 1466 uint64_t impl_opaque; 1467 1468 /* empty structures do not have zero size in C++ leading to compilation errors 1469 * with clang about structure having different sizes in C and C++. 1470 * Since these are all zero-sized arrays, we can omit the "union" wrapper for 1471 * C++ builds, removing the warning. 1472 */ 1473 #ifndef __cplusplus 1474 /**< Implementation specific opaque value. 1475 * An implementation may use this field to hold implementation specific 1476 * value to share between dequeue and enqueue operation. 1477 * The application should not modify this field. 1478 */ 1479 union __rte_aligned(16) { 1480 #endif 1481 struct rte_mbuf *mbufs[0]; 1482 void *ptrs[0]; 1483 uint64_t u64s[0]; 1484 #ifndef __cplusplus 1485 }; 1486 #endif 1487 /**< Start of the vector array union. Depending upon the event type the 1488 * vector array can be an array of mbufs or pointers or opaque u64 1489 * values. 1490 */ 1491 }; 1492 1493 /* Scheduler type definitions */ 1494 #define RTE_SCHED_TYPE_ORDERED 0 1495 /**< Ordered scheduling 1496 * 1497 * Events from an ordered flow of an event queue can be scheduled to multiple 1498 * ports for concurrent processing while maintaining the original event order, 1499 * i.e. the order in which they were first enqueued to that queue. 1500 * This scheme allows events pertaining to the same, potentially large, flow to 1501 * be processed in parallel on multiple cores without incurring any 1502 * application-level order restoration logic overhead. 1503 * 1504 * After events are dequeued from a set of ports, as those events are re-enqueued 1505 * to another queue (with the op field set to @ref RTE_EVENT_OP_FORWARD), the event 1506 * device restores the original event order - including events returned from all 1507 * ports in the set - before the events are placed on the destination queue, 1508 * for subsequent scheduling to ports. 1509 * 1510 * Any events not forwarded i.e. dropped explicitly via RELEASE or implicitly 1511 * released by the next dequeue operation on a port, are skipped by the reordering 1512 * stage and do not affect the reordering of other returned events. 1513 * 1514 * Any NEW events sent on a port are not ordered with respect to FORWARD events sent 1515 * on the same port, since they have no original event order. They also are not 1516 * ordered with respect to NEW events enqueued on other ports. 1517 * However, NEW events to the same destination queue from the same port are guaranteed 1518 * to be enqueued in the order they were submitted via rte_event_enqueue_burst(). 1519 * 1520 * NOTE: 1521 * In restoring event order of forwarded events, the eventdev API guarantees that 1522 * all events from the same flow (i.e. same @ref rte_event.flow_id, 1523 * @ref rte_event.priority and @ref rte_event.queue_id) will be put in the original 1524 * order before being forwarded to the destination queue. 1525 * Some eventdevs may implement stricter ordering to achieve this aim, 1526 * for example, restoring the order across *all* flows dequeued from the same ORDERED 1527 * queue. 1528 * 1529 * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE 1530 */ 1531 1532 #define RTE_SCHED_TYPE_ATOMIC 1 1533 /**< Atomic scheduling 1534 * 1535 * Events from an atomic flow, identified by a combination of @ref rte_event.flow_id, 1536 * @ref rte_event.queue_id and @ref rte_event.priority, can be scheduled only to a 1537 * single port at a time. The port is guaranteed to have exclusive (atomic) 1538 * access to the associated flow context, which enables the user to avoid SW 1539 * synchronization. Atomic flows also maintain event ordering 1540 * since only one port at a time can process events from each flow of an 1541 * event queue, and events within a flow are not reordered within the scheduler. 1542 * 1543 * An atomic flow is locked to a port when events from that flow are first 1544 * scheduled to that port. That lock remains in place until the 1545 * application calls rte_event_dequeue_burst() from the same port, 1546 * which implicitly releases the lock (if @ref RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL flag is not set). 1547 * User may allow the scheduler to release the lock earlier than that by invoking 1548 * rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation for each event from that flow. 1549 * 1550 * NOTE: Where multiple events from the same queue and atomic flow are scheduled to a port, 1551 * the lock for that flow is only released once the last event from the flow is released, 1552 * or forwarded to another queue. So long as there is at least one event from an atomic 1553 * flow scheduled to a port/core (including any events in the port's dequeue queue, not yet read 1554 * by the application), that port will hold the synchronization lock for that flow. 1555 * 1556 * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE 1557 */ 1558 1559 #define RTE_SCHED_TYPE_PARALLEL 2 1560 /**< Parallel scheduling 1561 * 1562 * The scheduler performs priority scheduling, load balancing, etc. functions 1563 * but does not provide additional event synchronization or ordering. 1564 * It is free to schedule events from a single parallel flow of an event queue 1565 * to multiple events ports for concurrent processing. 1566 * The application is responsible for flow context synchronization and 1567 * event ordering (SW synchronization). 1568 * 1569 * @see rte_event_queue_setup(), rte_event_dequeue_burst() 1570 */ 1571 1572 /* Event types to classify the event source */ 1573 #define RTE_EVENT_TYPE_ETHDEV 0x0 1574 /**< The event generated from ethdev subsystem */ 1575 #define RTE_EVENT_TYPE_CRYPTODEV 0x1 1576 /**< The event generated from crypodev subsystem */ 1577 #define RTE_EVENT_TYPE_TIMER 0x2 1578 /**< The event generated from event timer adapter */ 1579 #define RTE_EVENT_TYPE_CPU 0x3 1580 /**< The event generated from cpu for pipelining. 1581 * Application may use *sub_event_type* to further classify the event 1582 */ 1583 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4 1584 /**< The event generated from event eth Rx adapter */ 1585 #define RTE_EVENT_TYPE_DMADEV 0x5 1586 /**< The event generated from dma subsystem */ 1587 #define RTE_EVENT_TYPE_VECTOR 0x8 1588 /**< Indicates that event is a vector. 1589 * All vector event types should be a logical OR of EVENT_TYPE_VECTOR. 1590 * This simplifies the pipeline design as one can split processing the events 1591 * between vector events and normal event across event types. 1592 * Example: 1593 * if (ev.event_type & RTE_EVENT_TYPE_VECTOR) { 1594 * // Classify and handle vector event. 1595 * } else { 1596 * // Classify and handle event. 1597 * } 1598 */ 1599 #define RTE_EVENT_TYPE_ETHDEV_VECTOR \ 1600 (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV) 1601 /**< The event vector generated from ethdev subsystem */ 1602 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU) 1603 /**< The event vector generated from cpu for pipelining. */ 1604 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \ 1605 (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER) 1606 /**< The event vector generated from eth Rx adapter. */ 1607 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR \ 1608 (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV) 1609 /**< The event vector generated from cryptodev adapter. */ 1610 1611 #define RTE_EVENT_TYPE_MAX 0x10 1612 /**< Maximum number of event types */ 1613 1614 /* Event enqueue operations */ 1615 #define RTE_EVENT_OP_NEW 0 1616 /**< The @ref rte_event.op field must be set to this operation type to inject a new event, 1617 * i.e. one not previously dequeued, into the event device, to be scheduled 1618 * for processing. 1619 */ 1620 #define RTE_EVENT_OP_FORWARD 1 1621 /**< The application must set the @ref rte_event.op field to this operation type to return a 1622 * previously dequeued event to the event device to be scheduled for further processing. 1623 * 1624 * This event *must* be enqueued to the same port that the 1625 * event to be forwarded was dequeued from. 1626 * 1627 * The event's fields, including (but not limited to) flow_id, scheduling type, 1628 * destination queue, and event payload e.g. mbuf pointer, may all be updated as 1629 * desired by the application, but the @ref rte_event.impl_opaque field must 1630 * be kept to the same value as was present when the event was dequeued. 1631 */ 1632 #define RTE_EVENT_OP_RELEASE 2 1633 /**< Release the flow context associated with the schedule type. 1634 * 1635 * If current flow's scheduler type method is @ref RTE_SCHED_TYPE_ATOMIC 1636 * then this operation type hints the scheduler that the user has completed critical 1637 * section processing for this event in the current atomic context, and that the 1638 * scheduler may unlock any atomic locks held for this event. 1639 * If this is the last event from an atomic flow, i.e. all flow locks are released 1640 * (see @ref RTE_SCHED_TYPE_ATOMIC for details), the scheduler is now allowed to 1641 * schedule events from that flow from to another port. 1642 * However, the atomic locks may be still held until the next rte_event_dequeue_burst() 1643 * call; enqueuing an event with opt type @ref RTE_EVENT_OP_RELEASE is a hint only, 1644 * allowing the scheduler to release the atomic locks early, but not requiring it to do so. 1645 * 1646 * Early atomic lock release may increase parallelism and thus system 1647 * performance, but the user needs to design carefully the split into critical 1648 * vs non-critical sections. 1649 * 1650 * If current flow's scheduler type method is @ref RTE_SCHED_TYPE_ORDERED 1651 * then this operation type informs the scheduler that the current event has 1652 * completed processing and will not be returned to the scheduler, i.e. 1653 * it has been dropped, and so the reordering context for that event 1654 * should be considered filled. 1655 * 1656 * Events with this operation type must only be enqueued to the same port that the 1657 * event to be released was dequeued from. The @ref rte_event.impl_opaque 1658 * field in the release event must have the same value as that in the original dequeued event. 1659 * 1660 * If a dequeued event is re-enqueued with operation type of @ref RTE_EVENT_OP_RELEASE, 1661 * then any subsequent enqueue of that event - or a copy of it - must be done as event of type 1662 * @ref RTE_EVENT_OP_NEW, not @ref RTE_EVENT_OP_FORWARD. This is because any context for 1663 * the originally dequeued event, i.e. atomic locks, or reorder buffer entries, will have 1664 * been removed or invalidated by the release operation. 1665 */ 1666 1667 /** 1668 * The generic *rte_event* structure to hold the event attributes 1669 * for dequeue and enqueue operation 1670 */ 1671 struct rte_event { 1672 /* WORD0 */ 1673 union { 1674 uint64_t event; 1675 /** Event attributes for dequeue or enqueue operation */ 1676 struct { 1677 uint32_t flow_id:20; 1678 /**< Target flow identifier for the enqueue and dequeue operation. 1679 * 1680 * For @ref RTE_SCHED_TYPE_ATOMIC, this field is used to identify a 1681 * flow for atomicity within a queue & priority level, such that events 1682 * from each individual flow will only be scheduled to one port at a time. 1683 * 1684 * This field is preserved between enqueue and dequeue when 1685 * a device reports the @ref RTE_EVENT_DEV_CAP_CARRY_FLOW_ID 1686 * capability. Otherwise the value is implementation dependent 1687 * on dequeue. 1688 */ 1689 uint32_t sub_event_type:8; 1690 /**< Sub-event types based on the event source. 1691 * 1692 * This field is preserved between enqueue and dequeue. 1693 * 1694 * @see RTE_EVENT_TYPE_CPU 1695 */ 1696 uint32_t event_type:4; 1697 /**< Event type to classify the event source. (RTE_EVENT_TYPE_*) 1698 * 1699 * This field is preserved between enqueue and dequeue 1700 */ 1701 uint8_t op:2; 1702 /**< The type of event enqueue operation - new/forward/ etc. 1703 * 1704 * This field is *not* preserved across an instance 1705 * and is implementation dependent on dequeue. 1706 * 1707 * @see RTE_EVENT_OP_NEW 1708 * @see RTE_EVENT_OP_FORWARD 1709 * @see RTE_EVENT_OP_RELEASE 1710 */ 1711 uint8_t rsvd:4; 1712 /**< Reserved for future use. 1713 * 1714 * Should be set to zero when initializing event structures. 1715 * 1716 * When forwarding or releasing existing events dequeued from the scheduler, 1717 * this field can be ignored. 1718 */ 1719 uint8_t sched_type:2; 1720 /**< Scheduler synchronization type (RTE_SCHED_TYPE_*) 1721 * associated with flow id on a given event queue 1722 * for the enqueue and dequeue operation. 1723 * 1724 * This field is used to determine the scheduling type 1725 * for events sent to queues where @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES 1726 * is configured. 1727 * For queues where only a single scheduling type is available, 1728 * this field must be set to match the configured scheduling type. 1729 * 1730 * This field is preserved between enqueue and dequeue. 1731 * 1732 * @see RTE_SCHED_TYPE_ORDERED 1733 * @see RTE_SCHED_TYPE_ATOMIC 1734 * @see RTE_SCHED_TYPE_PARALLEL 1735 */ 1736 uint8_t queue_id; 1737 /**< Targeted event queue identifier for the enqueue or 1738 * dequeue operation. 1739 * The value must be less than @ref rte_event_dev_config.nb_event_queues 1740 * which was previously supplied to rte_event_dev_configure(). 1741 * 1742 * This field is preserved between enqueue on dequeue. 1743 */ 1744 uint8_t priority; 1745 /**< Event priority relative to other events in the 1746 * event queue. The requested priority should in the 1747 * range of [@ref RTE_EVENT_DEV_PRIORITY_HIGHEST, 1748 * @ref RTE_EVENT_DEV_PRIORITY_LOWEST]. 1749 * 1750 * The implementation shall normalize the requested 1751 * priority to supported priority value. 1752 * [For devices with where the supported priority range is a power-of-2, the 1753 * normalization will be done via bit-shifting, so only the highest 1754 * log2(num_priorities) bits will be used by the event device] 1755 * 1756 * Valid when the device has @ref RTE_EVENT_DEV_CAP_EVENT_QOS capability 1757 * and this field is preserved between enqueue and dequeue, 1758 * though with possible loss of precision due to normalization and 1759 * subsequent de-normalization. (For example, if a device only supports 8 1760 * priority levels, only the high 3 bits of this field will be 1761 * used by that device, and hence only the value of those 3 bits are 1762 * guaranteed to be preserved between enqueue and dequeue.) 1763 * 1764 * Ignored when device does not support @ref RTE_EVENT_DEV_CAP_EVENT_QOS 1765 * capability, and it is implementation dependent if this field is preserved 1766 * between enqueue and dequeue. 1767 */ 1768 uint8_t impl_opaque; 1769 /**< Opaque field for event device use. 1770 * 1771 * An event driver implementation may use this field to hold an 1772 * implementation specific value to share between 1773 * dequeue and enqueue operation. 1774 * 1775 * The application must not modify this field. 1776 * Its value is implementation dependent on dequeue, 1777 * and must be returned unmodified on enqueue when 1778 * op type is @ref RTE_EVENT_OP_FORWARD or @ref RTE_EVENT_OP_RELEASE. 1779 * This field is ignored on events with op type 1780 * @ref RTE_EVENT_OP_NEW. 1781 */ 1782 }; 1783 }; 1784 /* WORD1 */ 1785 union { 1786 uint64_t u64; 1787 /**< Opaque 64-bit value */ 1788 void *event_ptr; 1789 /**< Opaque event pointer */ 1790 struct rte_mbuf *mbuf; 1791 /**< mbuf pointer if dequeued event is associated with mbuf */ 1792 struct rte_event_vector *vec; 1793 /**< Event vector pointer. */ 1794 }; 1795 }; 1796 1797 /* Ethdev Rx adapter capability bitmap flags */ 1798 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1 1799 /**< This flag is sent when the packet transfer mechanism is in HW. 1800 * Ethdev can send packets to the event device using internal event port. 1801 */ 1802 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2 1803 /**< Adapter supports multiple event queues per ethdev. Every ethdev 1804 * Rx queue can be connected to a unique event queue. 1805 */ 1806 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4 1807 /**< The application can override the adapter generated flow ID in the 1808 * event. This flow ID can be specified when adding an ethdev Rx queue 1809 * to the adapter using the ev.flow_id member. 1810 * @see struct rte_event_eth_rx_adapter_queue_conf::ev 1811 * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags 1812 */ 1813 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8 1814 /**< Adapter supports event vectorization per ethdev. */ 1815 1816 /** 1817 * Retrieve the event device's ethdev Rx adapter capabilities for the 1818 * specified ethernet port 1819 * 1820 * @param dev_id 1821 * The identifier of the device. 1822 * 1823 * @param eth_port_id 1824 * The identifier of the ethernet device. 1825 * 1826 * @param[out] caps 1827 * A pointer to memory filled with Rx event adapter capabilities. 1828 * 1829 * @return 1830 * - 0: Success, driver provides Rx event adapter capabilities for the 1831 * ethernet device. 1832 * - <0: Error code returned by the driver function. 1833 */ 1834 int 1835 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, 1836 uint32_t *caps); 1837 1838 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0) 1839 /**< This flag is set when the timer mechanism is in HW. */ 1840 1841 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC (1ULL << 1) 1842 /**< This flag is set if periodic mode is supported. */ 1843 1844 /** 1845 * Retrieve the event device's timer adapter capabilities. 1846 * 1847 * @param dev_id 1848 * The identifier of the device. 1849 * 1850 * @param[out] caps 1851 * A pointer to memory to be filled with event timer adapter capabilities. 1852 * 1853 * @return 1854 * - 0: Success, driver provided event timer adapter capabilities. 1855 * - <0: Error code returned by the driver function. 1856 */ 1857 int 1858 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps); 1859 1860 /* Crypto adapter capability bitmap flag */ 1861 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1 1862 /**< Flag indicates HW is capable of generating events in 1863 * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send 1864 * packets to the event device as new events using an internal 1865 * event port. 1866 */ 1867 1868 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2 1869 /**< Flag indicates HW is capable of generating events in 1870 * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send 1871 * packets to the event device as forwarded event using an 1872 * internal event port. 1873 */ 1874 1875 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4 1876 /**< Flag indicates HW is capable of mapping crypto queue pair to 1877 * event queue. 1878 */ 1879 1880 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8 1881 /**< Flag indicates HW/SW supports a mechanism to store and retrieve 1882 * the private data information along with the crypto session. 1883 */ 1884 1885 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR 0x10 1886 /**< Flag indicates HW is capable of aggregating processed 1887 * crypto operations into rte_event_vector. 1888 */ 1889 1890 /** 1891 * Retrieve the event device's crypto adapter capabilities for the 1892 * specified cryptodev device 1893 * 1894 * @param dev_id 1895 * The identifier of the device. 1896 * 1897 * @param cdev_id 1898 * The identifier of the cryptodev device. 1899 * 1900 * @param[out] caps 1901 * A pointer to memory filled with event adapter capabilities. 1902 * It is expected to be pre-allocated & initialized by caller. 1903 * 1904 * @return 1905 * - 0: Success, driver provides event adapter capabilities for the 1906 * cryptodev device. 1907 * - <0: Error code returned by the driver function. 1908 */ 1909 int 1910 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, 1911 uint32_t *caps); 1912 1913 /* DMA adapter capability bitmap flag */ 1914 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1 1915 /**< Flag indicates HW is capable of generating events in 1916 * RTE_EVENT_OP_NEW enqueue operation. DMADEV will send 1917 * packets to the event device as new events using an 1918 * internal event port. 1919 */ 1920 1921 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2 1922 /**< Flag indicates HW is capable of generating events in 1923 * RTE_EVENT_OP_FORWARD enqueue operation. DMADEV will send 1924 * packets to the event device as forwarded event using an 1925 * internal event port. 1926 */ 1927 1928 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4 1929 /**< Flag indicates HW is capable of mapping DMA vchan to event queue. */ 1930 1931 /** 1932 * Retrieve the event device's DMA adapter capabilities for the 1933 * specified dmadev device 1934 * 1935 * @param dev_id 1936 * The identifier of the device. 1937 * 1938 * @param dmadev_id 1939 * The identifier of the dmadev device. 1940 * 1941 * @param[out] caps 1942 * A pointer to memory filled with event adapter capabilities. 1943 * It is expected to be pre-allocated & initialized by caller. 1944 * 1945 * @return 1946 * - 0: Success, driver provides event adapter capabilities for the 1947 * dmadev device. 1948 * - <0: Error code returned by the driver function. 1949 * 1950 */ 1951 __rte_experimental 1952 int 1953 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps); 1954 1955 /* Ethdev Tx adapter capability bitmap flags */ 1956 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1 1957 /**< This flag is sent when the PMD supports a packet transmit callback 1958 */ 1959 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2 1960 /**< Indicates that the Tx adapter is capable of handling event vector of 1961 * mbufs. 1962 */ 1963 1964 /** 1965 * Retrieve the event device's eth Tx adapter capabilities 1966 * 1967 * @param dev_id 1968 * The identifier of the device. 1969 * 1970 * @param eth_port_id 1971 * The identifier of the ethernet device. 1972 * 1973 * @param[out] caps 1974 * A pointer to memory filled with eth Tx adapter capabilities. 1975 * 1976 * @return 1977 * - 0: Success, driver provides eth Tx adapter capabilities. 1978 * - <0: Error code returned by the driver function. 1979 */ 1980 int 1981 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, 1982 uint32_t *caps); 1983 1984 /** 1985 * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst() 1986 * 1987 * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag 1988 * then application can use this function to convert timeout value in 1989 * nanoseconds to implementations specific timeout value supplied in 1990 * rte_event_dequeue_burst() 1991 * 1992 * @param dev_id 1993 * The identifier of the device. 1994 * @param ns 1995 * Wait time in nanosecond 1996 * @param[out] timeout_ticks 1997 * Value for the *timeout_ticks* parameter in rte_event_dequeue_burst() 1998 * 1999 * @return 2000 * - 0 on success. 2001 * - -ENOTSUP if the device doesn't support timeouts 2002 * - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL 2003 * - other values < 0 on failure. 2004 * 2005 * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT 2006 * @see rte_event_dev_configure() 2007 */ 2008 int 2009 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, 2010 uint64_t *timeout_ticks); 2011 2012 /** 2013 * Link multiple source event queues supplied in *queues* to the destination 2014 * event port designated by its *port_id* with associated service priority 2015 * supplied in *priorities* on the event device designated by its *dev_id*. 2016 * 2017 * The link establishment shall enable the event port *port_id* from 2018 * receiving events from the specified event queue(s) supplied in *queues* 2019 * 2020 * An event queue may link to one or more event ports. 2021 * The number of links can be established from an event queue to event port is 2022 * implementation defined. 2023 * 2024 * Event queue(s) to event port link establishment can be changed at runtime 2025 * without re-configuring the device to support scaling and to reduce the 2026 * latency of critical work by establishing the link with more event ports 2027 * at runtime. 2028 * 2029 * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater 2030 * than or equal to one, this function links the event queues to the default 2031 * profile_id i.e. profile_id 0 of the event port. 2032 * 2033 * @param dev_id 2034 * The identifier of the device. 2035 * 2036 * @param port_id 2037 * Event port identifier to select the destination port to link. 2038 * 2039 * @param queues 2040 * Points to an array of *nb_links* event queues to be linked 2041 * to the event port. 2042 * NULL value is allowed, in which case this function links all the configured 2043 * event queues *nb_event_queues* which previously supplied to 2044 * rte_event_dev_configure() to the event port *port_id* 2045 * 2046 * @param priorities 2047 * Points to an array of *nb_links* service priorities associated with each 2048 * event queue link to event port. 2049 * The priority defines the event port's servicing priority for 2050 * event queue, which may be ignored by an implementation. 2051 * The requested priority should in the range of 2052 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST]. 2053 * The implementation shall normalize the requested priority to 2054 * implementation supported priority value. 2055 * NULL value is allowed, in which case this function links the event queues 2056 * with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority 2057 * 2058 * @param nb_links 2059 * The number of links to establish. This parameter is ignored if queues is 2060 * NULL. 2061 * 2062 * @return 2063 * The number of links actually established. The return value can be less than 2064 * the value of the *nb_links* parameter when the implementation has the 2065 * limitation on specific queue to port link establishment or if invalid 2066 * parameters are specified in *queues* 2067 * If the return value is less than *nb_links*, the remaining links at the end 2068 * of link[] are not established, and the caller has to take care of them. 2069 * If return value is less than *nb_links* then implementation shall update the 2070 * rte_errno accordingly, Possible rte_errno values are 2071 * (EDQUOT) Quota exceeded(Application tried to link the queue configured with 2072 * RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports) 2073 * (EINVAL) Invalid parameter 2074 */ 2075 int 2076 rte_event_port_link(uint8_t dev_id, uint8_t port_id, 2077 const uint8_t queues[], const uint8_t priorities[], 2078 uint16_t nb_links); 2079 2080 /** 2081 * Unlink multiple source event queues supplied in *queues* from the destination 2082 * event port designated by its *port_id* on the event device designated 2083 * by its *dev_id*. 2084 * 2085 * The unlink call issues an async request to disable the event port *port_id* 2086 * from receiving events from the specified event queue *queue_id*. 2087 * Event queue(s) to event port unlink establishment can be changed at runtime 2088 * without re-configuring the device. 2089 * 2090 * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater 2091 * than or equal to one, this function unlinks the event queues from the default 2092 * profile identifier i.e. profile 0 of the event port. 2093 * 2094 * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks. 2095 * 2096 * @param dev_id 2097 * The identifier of the device. 2098 * 2099 * @param port_id 2100 * Event port identifier to select the destination port to unlink. 2101 * 2102 * @param queues 2103 * Points to an array of *nb_unlinks* event queues to be unlinked 2104 * from the event port. 2105 * NULL value is allowed, in which case this function unlinks all the 2106 * event queue(s) from the event port *port_id*. 2107 * 2108 * @param nb_unlinks 2109 * The number of unlinks to establish. This parameter is ignored if queues is 2110 * NULL. 2111 * 2112 * @return 2113 * The number of unlinks successfully requested. The return value can be less 2114 * than the value of the *nb_unlinks* parameter when the implementation has the 2115 * limitation on specific queue to port unlink establishment or 2116 * if invalid parameters are specified. 2117 * If the return value is less than *nb_unlinks*, the remaining queues at the 2118 * end of queues[] are not unlinked, and the caller has to take care of them. 2119 * If return value is less than *nb_unlinks* then implementation shall update 2120 * the rte_errno accordingly, Possible rte_errno values are 2121 * (EINVAL) Invalid parameter 2122 */ 2123 int 2124 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, 2125 uint8_t queues[], uint16_t nb_unlinks); 2126 2127 /** 2128 * Link multiple source event queues supplied in *queues* to the destination 2129 * event port designated by its *port_id* with associated profile identifier 2130 * supplied in *profile_id* with service priorities supplied in *priorities* 2131 * on the event device designated by its *dev_id*. 2132 * 2133 * If *profile_id* is set to 0 then, the links created by the call `rte_event_port_link` 2134 * will be overwritten. 2135 * 2136 * Event ports by default use profile_id 0 unless it is changed using the 2137 * call ``rte_event_port_profile_switch()``. 2138 * 2139 * The link establishment shall enable the event port *port_id* from 2140 * receiving events from the specified event queue(s) supplied in *queues* 2141 * 2142 * An event queue may link to one or more event ports. 2143 * The number of links can be established from an event queue to event port is 2144 * implementation defined. 2145 * 2146 * Event queue(s) to event port link establishment can be changed at runtime 2147 * without re-configuring the device to support scaling and to reduce the 2148 * latency of critical work by establishing the link with more event ports 2149 * at runtime. 2150 * 2151 * @param dev_id 2152 * The identifier of the device. 2153 * 2154 * @param port_id 2155 * Event port identifier to select the destination port to link. 2156 * 2157 * @param queues 2158 * Points to an array of *nb_links* event queues to be linked 2159 * to the event port. 2160 * NULL value is allowed, in which case this function links all the configured 2161 * event queues *nb_event_queues* which previously supplied to 2162 * rte_event_dev_configure() to the event port *port_id* 2163 * 2164 * @param priorities 2165 * Points to an array of *nb_links* service priorities associated with each 2166 * event queue link to event port. 2167 * The priority defines the event port's servicing priority for 2168 * event queue, which may be ignored by an implementation. 2169 * The requested priority should in the range of 2170 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST]. 2171 * The implementation shall normalize the requested priority to 2172 * implementation supported priority value. 2173 * NULL value is allowed, in which case this function links the event queues 2174 * with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority 2175 * 2176 * @param nb_links 2177 * The number of links to establish. This parameter is ignored if queues is 2178 * NULL. 2179 * 2180 * @param profile_id 2181 * The profile identifier associated with the links between event queues and 2182 * event port. Should be less than the max capability reported by 2183 * ``rte_event_dev_info::max_profiles_per_port`` 2184 * 2185 * @return 2186 * The number of links actually established. The return value can be less than 2187 * the value of the *nb_links* parameter when the implementation has the 2188 * limitation on specific queue to port link establishment or if invalid 2189 * parameters are specified in *queues* 2190 * If the return value is less than *nb_links*, the remaining links at the end 2191 * of link[] are not established, and the caller has to take care of them. 2192 * If return value is less than *nb_links* then implementation shall update the 2193 * rte_errno accordingly, Possible rte_errno values are 2194 * (EDQUOT) Quota exceeded(Application tried to link the queue configured with 2195 * RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports) 2196 * (EINVAL) Invalid parameter 2197 * 2198 */ 2199 __rte_experimental 2200 int 2201 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], 2202 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id); 2203 2204 /** 2205 * Unlink multiple source event queues supplied in *queues* that belong to profile 2206 * designated by *profile_id* from the destination event port designated by its 2207 * *port_id* on the event device designated by its *dev_id*. 2208 * 2209 * If *profile_id* is set to 0 i.e., the default profile then, then this function 2210 * will act as ``rte_event_port_unlink``. 2211 * 2212 * The unlink call issues an async request to disable the event port *port_id* 2213 * from receiving events from the specified event queue *queue_id*. 2214 * Event queue(s) to event port unlink establishment can be changed at runtime 2215 * without re-configuring the device. 2216 * 2217 * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks. 2218 * 2219 * @param dev_id 2220 * The identifier of the device. 2221 * 2222 * @param port_id 2223 * Event port identifier to select the destination port to unlink. 2224 * 2225 * @param queues 2226 * Points to an array of *nb_unlinks* event queues to be unlinked 2227 * from the event port. 2228 * NULL value is allowed, in which case this function unlinks all the 2229 * event queue(s) from the event port *port_id*. 2230 * 2231 * @param nb_unlinks 2232 * The number of unlinks to establish. This parameter is ignored if queues is 2233 * NULL. 2234 * 2235 * @param profile_id 2236 * The profile identifier associated with the links between event queues and 2237 * event port. Should be less than the max capability reported by 2238 * ``rte_event_dev_info::max_profiles_per_port`` 2239 * 2240 * @return 2241 * The number of unlinks successfully requested. The return value can be less 2242 * than the value of the *nb_unlinks* parameter when the implementation has the 2243 * limitation on specific queue to port unlink establishment or 2244 * if invalid parameters are specified. 2245 * If the return value is less than *nb_unlinks*, the remaining queues at the 2246 * end of queues[] are not unlinked, and the caller has to take care of them. 2247 * If return value is less than *nb_unlinks* then implementation shall update 2248 * the rte_errno accordingly, Possible rte_errno values are 2249 * (EINVAL) Invalid parameter 2250 * 2251 */ 2252 __rte_experimental 2253 int 2254 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], 2255 uint16_t nb_unlinks, uint8_t profile_id); 2256 2257 /** 2258 * Returns the number of unlinks in progress. 2259 * 2260 * This function provides the application with a method to detect when an 2261 * unlink has been completed by the implementation. 2262 * 2263 * @see rte_event_port_unlink() to issue unlink requests. 2264 * 2265 * @param dev_id 2266 * The identifier of the device. 2267 * 2268 * @param port_id 2269 * Event port identifier to select port to check for unlinks in progress. 2270 * 2271 * @return 2272 * The number of unlinks that are in progress. A return of zero indicates that 2273 * there are no outstanding unlink requests. A positive return value indicates 2274 * the number of unlinks that are in progress, but are not yet complete. 2275 * A negative return value indicates an error, -EINVAL indicates an invalid 2276 * parameter passed for *dev_id* or *port_id*. 2277 */ 2278 int 2279 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id); 2280 2281 /** 2282 * Retrieve the list of source event queues and its associated service priority 2283 * linked to the destination event port designated by its *port_id* 2284 * on the event device designated by its *dev_id*. 2285 * 2286 * @param dev_id 2287 * The identifier of the device. 2288 * 2289 * @param port_id 2290 * Event port identifier. 2291 * 2292 * @param[out] queues 2293 * Points to an array of *queues* for output. 2294 * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to 2295 * store the event queue(s) linked with event port *port_id* 2296 * 2297 * @param[out] priorities 2298 * Points to an array of *priorities* for output. 2299 * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to 2300 * store the service priority associated with each event queue linked 2301 * 2302 * @return 2303 * The number of links established on the event port designated by its 2304 * *port_id*. 2305 * - <0 on failure. 2306 */ 2307 int 2308 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, 2309 uint8_t queues[], uint8_t priorities[]); 2310 2311 /** 2312 * Retrieve the list of source event queues and its service priority 2313 * associated to a *profile_id* and linked to the destination event port 2314 * designated by its *port_id* on the event device designated by its *dev_id*. 2315 * 2316 * @param dev_id 2317 * The identifier of the device. 2318 * 2319 * @param port_id 2320 * Event port identifier. 2321 * 2322 * @param[out] queues 2323 * Points to an array of *queues* for output. 2324 * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to 2325 * store the event queue(s) linked with event port *port_id* 2326 * 2327 * @param[out] priorities 2328 * Points to an array of *priorities* for output. 2329 * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to 2330 * store the service priority associated with each event queue linked 2331 * 2332 * @param profile_id 2333 * The profile identifier associated with the links between event queues and 2334 * event port. Should be less than the max capability reported by 2335 * ``rte_event_dev_info::max_profiles_per_port`` 2336 * 2337 * @return 2338 * The number of links established on the event port designated by its 2339 * *port_id*. 2340 * - <0 on failure. 2341 */ 2342 __rte_experimental 2343 int 2344 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], 2345 uint8_t priorities[], uint8_t profile_id); 2346 2347 /** 2348 * Retrieve the service ID of the event dev. If the adapter doesn't use 2349 * a rte_service function, this function returns -ESRCH. 2350 * 2351 * @param dev_id 2352 * The identifier of the device. 2353 * 2354 * @param [out] service_id 2355 * A pointer to a uint32_t, to be filled in with the service id. 2356 * 2357 * @return 2358 * - 0: Success 2359 * - <0: Error code on failure, if the event dev doesn't use a rte_service 2360 * function, this function returns -ESRCH. 2361 */ 2362 int 2363 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id); 2364 2365 /** 2366 * Dump internal information about *dev_id* to the FILE* provided in *f*. 2367 * 2368 * @param dev_id 2369 * The identifier of the device. 2370 * 2371 * @param f 2372 * A pointer to a file for output 2373 * 2374 * @return 2375 * - 0: on success 2376 * - <0: on failure. 2377 */ 2378 int 2379 rte_event_dev_dump(uint8_t dev_id, FILE *f); 2380 2381 /** Maximum name length for extended statistics counters */ 2382 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64 2383 2384 /** 2385 * Selects the component of the eventdev to retrieve statistics from. 2386 */ 2387 enum rte_event_dev_xstats_mode { 2388 RTE_EVENT_DEV_XSTATS_DEVICE, 2389 RTE_EVENT_DEV_XSTATS_PORT, 2390 RTE_EVENT_DEV_XSTATS_QUEUE, 2391 }; 2392 2393 /** 2394 * A name-key lookup element for extended statistics. 2395 * 2396 * This structure is used to map between names and ID numbers 2397 * for extended ethdev statistics. 2398 */ 2399 struct rte_event_dev_xstats_name { 2400 char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE]; 2401 }; 2402 2403 /** 2404 * Retrieve names of extended statistics of an event device. 2405 * 2406 * @param dev_id 2407 * The identifier of the event device. 2408 * @param mode 2409 * The mode of statistics to retrieve. Choices include the device statistics, 2410 * port statistics or queue statistics. 2411 * @param queue_port_id 2412 * Used to specify the port or queue number in queue or port mode, and is 2413 * ignored in device mode. 2414 * @param[out] xstats_names 2415 * Block of memory to insert names into. Must be at least size in capacity. 2416 * If set to NULL, function returns required capacity. 2417 * @param[out] ids 2418 * Block of memory to insert ids into. Must be at least size in capacity. 2419 * If set to NULL, function returns required capacity. The id values returned 2420 * can be passed to *rte_event_dev_xstats_get* to select statistics. 2421 * @param size 2422 * Capacity of xstats_names (number of names). 2423 * @return 2424 * - positive value lower or equal to size: success. The return value 2425 * is the number of entries filled in the stats table. 2426 * - positive value higher than size: error, the given statistics table 2427 * is too small. The return value corresponds to the size that should 2428 * be given to succeed. The entries in the table are not valid and 2429 * shall not be used by the caller. 2430 * - negative value on error: 2431 * -ENODEV for invalid *dev_id* 2432 * -EINVAL for invalid mode, queue port or id parameters 2433 * -ENOTSUP if the device doesn't support this function. 2434 */ 2435 int 2436 rte_event_dev_xstats_names_get(uint8_t dev_id, 2437 enum rte_event_dev_xstats_mode mode, 2438 uint8_t queue_port_id, 2439 struct rte_event_dev_xstats_name *xstats_names, 2440 uint64_t *ids, 2441 unsigned int size); 2442 2443 /** 2444 * Retrieve extended statistics of an event device. 2445 * 2446 * @param dev_id 2447 * The identifier of the device. 2448 * @param mode 2449 * The mode of statistics to retrieve. Choices include the device statistics, 2450 * port statistics or queue statistics. 2451 * @param queue_port_id 2452 * Used to specify the port or queue number in queue or port mode, and is 2453 * ignored in device mode. 2454 * @param ids 2455 * The id numbers of the stats to get. The ids can be got from the stat 2456 * position in the stat list from rte_event_dev_get_xstats_names(), or 2457 * by using rte_event_dev_xstats_by_name_get(). 2458 * @param[out] values 2459 * The values for each stats request by ID. 2460 * @param n 2461 * The number of stats requested 2462 * @return 2463 * - positive value: number of stat entries filled into the values array 2464 * - negative value on error: 2465 * -ENODEV for invalid *dev_id* 2466 * -EINVAL for invalid mode, queue port or id parameters 2467 * -ENOTSUP if the device doesn't support this function. 2468 */ 2469 int 2470 rte_event_dev_xstats_get(uint8_t dev_id, 2471 enum rte_event_dev_xstats_mode mode, 2472 uint8_t queue_port_id, 2473 const uint64_t ids[], 2474 uint64_t values[], unsigned int n); 2475 2476 /** 2477 * Retrieve the value of a single stat by requesting it by name. 2478 * 2479 * @param dev_id 2480 * The identifier of the device 2481 * @param name 2482 * The stat name to retrieve 2483 * @param[out] id 2484 * If non-NULL, the numerical id of the stat will be returned, so that further 2485 * requests for the stat can be got using rte_event_dev_xstats_get, which will 2486 * be faster as it doesn't need to scan a list of names for the stat. 2487 * If the stat cannot be found, the id returned will be (unsigned)-1. 2488 * @return 2489 * - positive value or zero: the stat value 2490 * - negative value: -EINVAL if stat not found, -ENOTSUP if not supported. 2491 */ 2492 uint64_t 2493 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, 2494 uint64_t *id); 2495 2496 /** 2497 * Reset the values of the xstats of the selected component in the device. 2498 * 2499 * @param dev_id 2500 * The identifier of the device 2501 * @param mode 2502 * The mode of the statistics to reset. Choose from device, queue or port. 2503 * @param queue_port_id 2504 * The queue or port to reset. 0 and positive values select ports and queues, 2505 * while -1 indicates all ports or queues. 2506 * @param ids 2507 * Selects specific statistics to be reset. When NULL, all statistics selected 2508 * by *mode* will be reset. If non-NULL, must point to array of at least 2509 * *nb_ids* size. 2510 * @param nb_ids 2511 * The number of ids available from the *ids* array. Ignored when ids is NULL. 2512 * @return 2513 * - zero: successfully reset the statistics to zero 2514 * - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported. 2515 */ 2516 int 2517 rte_event_dev_xstats_reset(uint8_t dev_id, 2518 enum rte_event_dev_xstats_mode mode, 2519 int16_t queue_port_id, 2520 const uint64_t ids[], 2521 uint32_t nb_ids); 2522 2523 /** 2524 * Trigger the eventdev self test. 2525 * 2526 * @param dev_id 2527 * The identifier of the device 2528 * @return 2529 * - 0: Selftest successful 2530 * - -ENOTSUP if the device doesn't support selftest 2531 * - other values < 0 on failure. 2532 */ 2533 int rte_event_dev_selftest(uint8_t dev_id); 2534 2535 /** 2536 * Get the memory required per event vector based on the number of elements per 2537 * vector. 2538 * This should be used to create the mempool that holds the event vectors. 2539 * 2540 * @param name 2541 * The name of the vector pool. 2542 * @param n 2543 * The number of elements in the mbuf pool. 2544 * @param cache_size 2545 * Size of the per-core object cache. See rte_mempool_create() for 2546 * details. 2547 * @param nb_elem 2548 * The number of elements that a single event vector should be able to hold. 2549 * @param socket_id 2550 * The socket identifier where the memory should be allocated. The 2551 * value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the 2552 * reserved zone 2553 * 2554 * @return 2555 * The pointer to the newly allocated mempool, on success. NULL on error 2556 * with rte_errno set appropriately. Possible rte_errno values include: 2557 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure 2558 * - E_RTE_SECONDARY - function was called from a secondary process instance 2559 * - EINVAL - cache size provided is too large, or priv_size is not aligned. 2560 * - ENOSPC - the maximum number of memzones has already been allocated 2561 * - EEXIST - a memzone with the same name already exists 2562 * - ENOMEM - no appropriate memory area found in which to create memzone 2563 * - ENAMETOOLONG - mempool name requested is too long. 2564 */ 2565 struct rte_mempool * 2566 rte_event_vector_pool_create(const char *name, unsigned int n, 2567 unsigned int cache_size, uint16_t nb_elem, 2568 int socket_id); 2569 2570 #include <rte_eventdev_core.h> 2571 2572 #ifdef __cplusplus 2573 extern "C" { 2574 #endif 2575 2576 static __rte_always_inline uint16_t 2577 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, 2578 const struct rte_event ev[], uint16_t nb_events, 2579 const event_enqueue_burst_t fn) 2580 { 2581 const struct rte_event_fp_ops *fp_ops; 2582 void *port; 2583 2584 fp_ops = &rte_event_fp_ops[dev_id]; 2585 port = fp_ops->data[port_id]; 2586 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 2587 if (dev_id >= RTE_EVENT_MAX_DEVS || 2588 port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) { 2589 rte_errno = EINVAL; 2590 return 0; 2591 } 2592 2593 if (port == NULL) { 2594 rte_errno = EINVAL; 2595 return 0; 2596 } 2597 #endif 2598 rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn); 2599 2600 return fn(port, ev, nb_events); 2601 } 2602 2603 /** 2604 * Enqueue a burst of events objects or an event object supplied in *rte_event* 2605 * structure on an event device designated by its *dev_id* through the event 2606 * port specified by *port_id*. Each event object specifies the event queue on 2607 * which it will be enqueued. 2608 * 2609 * The *nb_events* parameter is the number of event objects to enqueue which are 2610 * supplied in the *ev* array of *rte_event* structure. 2611 * 2612 * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be 2613 * enqueued to the same port that their associated events were dequeued from. 2614 * 2615 * The rte_event_enqueue_burst() function returns the number of 2616 * events objects it actually enqueued. A return value equal to *nb_events* 2617 * means that all event objects have been enqueued. 2618 * 2619 * @param dev_id 2620 * The identifier of the device. 2621 * @param port_id 2622 * The identifier of the event port. 2623 * @param ev 2624 * Points to an array of *nb_events* objects of type *rte_event* structure 2625 * which contain the event object enqueue operations to be processed. 2626 * @param nb_events 2627 * The number of event objects to enqueue, typically number of 2628 * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...) 2629 * available for this port. 2630 * 2631 * @return 2632 * The number of event objects actually enqueued on the event device. The 2633 * return value can be less than the value of the *nb_events* parameter when 2634 * the event devices queue is full or if invalid parameters are specified in a 2635 * *rte_event*. If the return value is less than *nb_events*, the remaining 2636 * events at the end of ev[] are not consumed and the caller has to take care 2637 * of them, and rte_errno is set accordingly. Possible errno values include: 2638 * - EINVAL The port ID is invalid, device ID is invalid, an event's queue 2639 * ID is invalid, or an event's sched type doesn't match the 2640 * capabilities of the destination queue. 2641 * - ENOSPC The event port was backpressured and unable to enqueue 2642 * one or more events. This error code is only applicable to 2643 * closed systems. 2644 * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH 2645 */ 2646 static inline uint16_t 2647 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, 2648 const struct rte_event ev[], uint16_t nb_events) 2649 { 2650 const struct rte_event_fp_ops *fp_ops; 2651 2652 fp_ops = &rte_event_fp_ops[dev_id]; 2653 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, 2654 fp_ops->enqueue_burst); 2655 } 2656 2657 /** 2658 * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on 2659 * an event device designated by its *dev_id* through the event port specified 2660 * by *port_id*. 2661 * 2662 * Provides the same functionality as rte_event_enqueue_burst(), expect that 2663 * application can use this API when the all objects in the burst contains 2664 * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized 2665 * function can provide the additional hint to the PMD and optimize if possible. 2666 * 2667 * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst 2668 * has event object of operation type != RTE_EVENT_OP_NEW. 2669 * 2670 * @param dev_id 2671 * The identifier of the device. 2672 * @param port_id 2673 * The identifier of the event port. 2674 * @param ev 2675 * Points to an array of *nb_events* objects of type *rte_event* structure 2676 * which contain the event object enqueue operations to be processed. 2677 * @param nb_events 2678 * The number of event objects to enqueue, typically number of 2679 * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...) 2680 * available for this port. 2681 * 2682 * @return 2683 * The number of event objects actually enqueued on the event device. The 2684 * return value can be less than the value of the *nb_events* parameter when 2685 * the event devices queue is full or if invalid parameters are specified in a 2686 * *rte_event*. If the return value is less than *nb_events*, the remaining 2687 * events at the end of ev[] are not consumed and the caller has to take care 2688 * of them, and rte_errno is set accordingly. Possible errno values include: 2689 * - EINVAL The port ID is invalid, device ID is invalid, an event's queue 2690 * ID is invalid, or an event's sched type doesn't match the 2691 * capabilities of the destination queue. 2692 * - ENOSPC The event port was backpressured and unable to enqueue 2693 * one or more events. This error code is only applicable to 2694 * closed systems. 2695 * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH 2696 * @see rte_event_enqueue_burst() 2697 */ 2698 static inline uint16_t 2699 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, 2700 const struct rte_event ev[], uint16_t nb_events) 2701 { 2702 const struct rte_event_fp_ops *fp_ops; 2703 2704 fp_ops = &rte_event_fp_ops[dev_id]; 2705 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, 2706 fp_ops->enqueue_new_burst); 2707 } 2708 2709 /** 2710 * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD* 2711 * on an event device designated by its *dev_id* through the event port 2712 * specified by *port_id*. 2713 * 2714 * Provides the same functionality as rte_event_enqueue_burst(), expect that 2715 * application can use this API when the all objects in the burst contains 2716 * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized 2717 * function can provide the additional hint to the PMD and optimize if possible. 2718 * 2719 * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst 2720 * has event object of operation type != RTE_EVENT_OP_FORWARD. 2721 * 2722 * @param dev_id 2723 * The identifier of the device. 2724 * @param port_id 2725 * The identifier of the event port. 2726 * @param ev 2727 * Points to an array of *nb_events* objects of type *rte_event* structure 2728 * which contain the event object enqueue operations to be processed. 2729 * @param nb_events 2730 * The number of event objects to enqueue, typically number of 2731 * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...) 2732 * available for this port. 2733 * 2734 * @return 2735 * The number of event objects actually enqueued on the event device. The 2736 * return value can be less than the value of the *nb_events* parameter when 2737 * the event devices queue is full or if invalid parameters are specified in a 2738 * *rte_event*. If the return value is less than *nb_events*, the remaining 2739 * events at the end of ev[] are not consumed and the caller has to take care 2740 * of them, and rte_errno is set accordingly. Possible errno values include: 2741 * - EINVAL The port ID is invalid, device ID is invalid, an event's queue 2742 * ID is invalid, or an event's sched type doesn't match the 2743 * capabilities of the destination queue. 2744 * - ENOSPC The event port was backpressured and unable to enqueue 2745 * one or more events. This error code is only applicable to 2746 * closed systems. 2747 * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH 2748 * @see rte_event_enqueue_burst() 2749 */ 2750 static inline uint16_t 2751 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, 2752 const struct rte_event ev[], uint16_t nb_events) 2753 { 2754 const struct rte_event_fp_ops *fp_ops; 2755 2756 fp_ops = &rte_event_fp_ops[dev_id]; 2757 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, 2758 fp_ops->enqueue_forward_burst); 2759 } 2760 2761 /** 2762 * Dequeue a burst of events objects or an event object from the event port 2763 * designated by its *event_port_id*, on an event device designated 2764 * by its *dev_id*. 2765 * 2766 * rte_event_dequeue_burst() does not dictate the specifics of scheduling 2767 * algorithm as each eventdev driver may have different criteria to schedule 2768 * an event. However, in general, from an application perspective scheduler may 2769 * use the following scheme to dispatch an event to the port. 2770 * 2771 * 1) Selection of event queue based on 2772 * a) The list of event queues are linked to the event port. 2773 * b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event 2774 * queue selection from list is based on event queue priority relative to 2775 * other event queue supplied as *priority* in rte_event_queue_setup() 2776 * c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event 2777 * queue selection from the list is based on event priority supplied as 2778 * *priority* in rte_event_enqueue_burst() 2779 * 2) Selection of event 2780 * a) The number of flows available in selected event queue. 2781 * b) Schedule type method associated with the event 2782 * 2783 * The *nb_events* parameter is the maximum number of event objects to dequeue 2784 * which are returned in the *ev* array of *rte_event* structure. 2785 * 2786 * The rte_event_dequeue_burst() function returns the number of events objects 2787 * it actually dequeued. A return value equal to *nb_events* means that all 2788 * event objects have been dequeued. 2789 * 2790 * The number of events dequeued is the number of scheduler contexts held by 2791 * this port. These contexts are automatically released in the next 2792 * rte_event_dequeue_burst() invocation if the port supports implicit 2793 * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE 2794 * operation can be used to release the contexts early. 2795 * 2796 * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be 2797 * enqueued to the same port that their associated events were dequeued from. 2798 * 2799 * @param dev_id 2800 * The identifier of the device. 2801 * @param port_id 2802 * The identifier of the event port. 2803 * @param[out] ev 2804 * Points to an array of *nb_events* objects of type *rte_event* structure 2805 * for output to be populated with the dequeued event objects. 2806 * @param nb_events 2807 * The maximum number of event objects to dequeue, typically number of 2808 * rte_event_port_dequeue_depth() available for this port. 2809 * 2810 * @param timeout_ticks 2811 * - 0 no-wait, returns immediately if there is no event. 2812 * - >0 wait for the event, if the device is configured with 2813 * RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until 2814 * at least one event is available or *timeout_ticks* time. 2815 * if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT 2816 * then this function will wait until the event available or 2817 * *dequeue_timeout_ns* ns which was previously supplied to 2818 * rte_event_dev_configure() 2819 * 2820 * @return 2821 * The number of event objects actually dequeued from the port. The return 2822 * value can be less than the value of the *nb_events* parameter when the 2823 * event port's queue is not full. 2824 * 2825 * @see rte_event_port_dequeue_depth() 2826 */ 2827 static inline uint16_t 2828 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], 2829 uint16_t nb_events, uint64_t timeout_ticks) 2830 { 2831 const struct rte_event_fp_ops *fp_ops; 2832 void *port; 2833 2834 fp_ops = &rte_event_fp_ops[dev_id]; 2835 port = fp_ops->data[port_id]; 2836 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 2837 if (dev_id >= RTE_EVENT_MAX_DEVS || 2838 port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) { 2839 rte_errno = EINVAL; 2840 return 0; 2841 } 2842 2843 if (port == NULL) { 2844 rte_errno = EINVAL; 2845 return 0; 2846 } 2847 #endif 2848 rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events); 2849 2850 return (fp_ops->dequeue_burst)(port, ev, nb_events, timeout_ticks); 2851 } 2852 2853 #define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0) 2854 /**< Force an immediately flush of any buffered events in the port, 2855 * potentially at the cost of additional overhead. 2856 * 2857 * @see rte_event_maintain() 2858 */ 2859 2860 /** 2861 * Maintain an event device. 2862 * 2863 * This function is only relevant for event devices which do not have 2864 * the @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE flag set. Such devices 2865 * require an application thread using a particular port to 2866 * periodically call rte_event_maintain() on that port during periods 2867 * which it is neither attempting to enqueue events to nor dequeue 2868 * events from the port. rte_event_maintain() is a low-overhead 2869 * function and should be called at a high rate (e.g., in the 2870 * application's poll loop). 2871 * 2872 * No port may be left unmaintained. 2873 * 2874 * At the application thread's convenience, rte_event_maintain() may 2875 * (but is not required to) be called even during periods when enqueue 2876 * or dequeue functions are being called, at the cost of a slight 2877 * increase in overhead. 2878 * 2879 * rte_event_maintain() may be called on event devices which have set 2880 * @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, in which case it is a 2881 * no-operation. 2882 * 2883 * @param dev_id 2884 * The identifier of the device. 2885 * @param port_id 2886 * The identifier of the event port. 2887 * @param op 2888 * 0, or @ref RTE_EVENT_DEV_MAINT_OP_FLUSH. 2889 * @return 2890 * - 0 on success. 2891 * - -EINVAL if *dev_id*, *port_id*, or *op* is invalid. 2892 * 2893 * @see RTE_EVENT_DEV_CAP_MAINTENANCE_FREE 2894 */ 2895 static inline int 2896 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op) 2897 { 2898 const struct rte_event_fp_ops *fp_ops; 2899 void *port; 2900 2901 fp_ops = &rte_event_fp_ops[dev_id]; 2902 port = fp_ops->data[port_id]; 2903 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 2904 if (dev_id >= RTE_EVENT_MAX_DEVS || 2905 port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) 2906 return -EINVAL; 2907 2908 if (port == NULL) 2909 return -EINVAL; 2910 2911 if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH)) 2912 return -EINVAL; 2913 #endif 2914 rte_eventdev_trace_maintain(dev_id, port_id, op); 2915 2916 if (fp_ops->maintain != NULL) 2917 fp_ops->maintain(port, op); 2918 2919 return 0; 2920 } 2921 2922 /** 2923 * Change the active profile on an event port. 2924 * 2925 * This function is used to change the current active profile on an event port 2926 * when multiple link profiles are configured on an event port through the 2927 * function call ``rte_event_port_profile_links_set``. 2928 * 2929 * On the subsequent ``rte_event_dequeue_burst`` call, only the event queues 2930 * that were associated with the newly active profile will participate in 2931 * scheduling. 2932 * 2933 * @param dev_id 2934 * The identifier of the device. 2935 * @param port_id 2936 * The identifier of the event port. 2937 * @param profile_id 2938 * The identifier of the profile. 2939 * @return 2940 * - 0 on success. 2941 * - -EINVAL if *dev_id*, *port_id*, or *profile_id* is invalid. 2942 */ 2943 static inline uint8_t 2944 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id) 2945 { 2946 const struct rte_event_fp_ops *fp_ops; 2947 void *port; 2948 2949 fp_ops = &rte_event_fp_ops[dev_id]; 2950 port = fp_ops->data[port_id]; 2951 2952 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 2953 if (dev_id >= RTE_EVENT_MAX_DEVS || 2954 port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) 2955 return -EINVAL; 2956 2957 if (port == NULL) 2958 return -EINVAL; 2959 2960 if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT) 2961 return -EINVAL; 2962 #endif 2963 rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id); 2964 2965 return fp_ops->profile_switch(port, profile_id); 2966 } 2967 2968 /** 2969 * Modify the pre-schedule type to use on an event port. 2970 * 2971 * This function is used to change the current pre-schedule type configured 2972 * on an event port, the pre-schedule type can be set to none to disable pre-scheduling. 2973 * This effects the subsequent ``rte_event_dequeue_burst`` call. 2974 * The event device should support RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE capability. 2975 * 2976 * To avoid fastpath capability checks if an event device does not support 2977 * RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE capability, then this function will 2978 * return -ENOTSUP. 2979 * 2980 * @param dev_id 2981 * The identifier of the device. 2982 * @param port_id 2983 * The identifier of the event port. 2984 * @param type 2985 * The preschedule type to use on the event port. 2986 * @return 2987 * - 0 on success. 2988 * - -EINVAL if *dev_id*, *port_id*, or *type* is invalid. 2989 * - -ENOTSUP if the device does not support per port preschedule capability. 2990 */ 2991 __rte_experimental 2992 static inline int 2993 rte_event_port_preschedule_modify(uint8_t dev_id, uint8_t port_id, 2994 enum rte_event_dev_preschedule_type type) 2995 { 2996 const struct rte_event_fp_ops *fp_ops; 2997 void *port; 2998 2999 fp_ops = &rte_event_fp_ops[dev_id]; 3000 port = fp_ops->data[port_id]; 3001 3002 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 3003 if (dev_id >= RTE_EVENT_MAX_DEVS || port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) 3004 return -EINVAL; 3005 3006 if (port == NULL) 3007 return -EINVAL; 3008 #endif 3009 rte_eventdev_trace_port_preschedule_modify(dev_id, port_id, type); 3010 3011 return fp_ops->preschedule_modify(port, type); 3012 } 3013 3014 /** 3015 * Provide a hint to the event device to pre-schedule events to event port . 3016 * 3017 * Hint the event device to pre-schedule events to the event port. 3018 * The call doesn't not guarantee that the events will be pre-scheduleed. 3019 * The call doesn't release the flow context currently held by the event port. 3020 * The event device should support RTE_EVENT_DEV_CAP_PRESCHEDULE_EXPLICIT capability. 3021 * 3022 * When pre-scheduling is enabled at an event device/port level or if 3023 * the capability is not supported, then the hint is ignored. 3024 * 3025 * Subsequent calls to rte_event_dequeue_burst() will dequeue the pre-schedule 3026 * events but pre-schedule operation is not issued again. 3027 * 3028 * @param dev_id 3029 * The identifier of the device. 3030 * @param port_id 3031 * The identifier of the event port. 3032 * @param type 3033 * The pre-schedule type to use on the event port. 3034 */ 3035 __rte_experimental 3036 static inline void 3037 rte_event_port_preschedule(uint8_t dev_id, uint8_t port_id, 3038 enum rte_event_dev_preschedule_type type) 3039 { 3040 const struct rte_event_fp_ops *fp_ops; 3041 void *port; 3042 3043 fp_ops = &rte_event_fp_ops[dev_id]; 3044 port = fp_ops->data[port_id]; 3045 3046 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 3047 if (dev_id >= RTE_EVENT_MAX_DEVS || port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) 3048 return; 3049 if (port == NULL) 3050 return; 3051 #endif 3052 rte_eventdev_trace_port_preschedule(dev_id, port_id, type); 3053 3054 fp_ops->preschedule(port, type); 3055 } 3056 #ifdef __cplusplus 3057 } 3058 #endif 3059 3060 #endif /* _RTE_EVENTDEV_H_ */ 3061