xref: /dpdk/lib/eventdev/rte_eventdev.h (revision c6552d9a8deffa448de2d5e2e726f50508c1efd2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
11 /**
12  * @file
13  *
14  * RTE Event Device API
15  * ====================
16  *
17  * In a traditional DPDK application model, the application polls Ethdev port RX
18  * queues to look for work, and processing is done in a run-to-completion manner,
19  * after which the packets are transmitted on a Ethdev TX queue. Load is
20  * distributed by statically assigning ports and queues to lcores, and NIC
21  * receive-side scaling (RSS), or similar, is employed to distribute network flows
22  * (and thus work) on the same port across multiple RX queues.
23  *
24  * In contrast, in an event-driven model, as supported by this "eventdev" library,
25  * incoming packets (or other input events) are fed into an event device, which
26  * schedules those packets across the available lcores, in accordance with its configuration.
27  * This event-driven programming model offers applications automatic multicore scaling,
28  * dynamic load balancing, pipelining, packet order maintenance, synchronization,
29  * and prioritization/quality of service.
30  *
31  * The Event Device API is composed of two parts:
32  *
33  * - The application-oriented Event API that includes functions to setup
34  *   an event device (configure it, setup its queues, ports and start it), to
35  *   establish the links between queues and ports to receive events, and so on.
36  *
37  * - The driver-oriented Event API that exports a function allowing
38  *   an event poll Mode Driver (PMD) to register itself as
39  *   an event device driver.
40  *
41  * Application-oriented Event API
42  * ------------------------------
43  *
44  * Event device components:
45  *
46  *                     +-----------------+
47  *                     | +-------------+ |
48  *        +-------+    | |    flow 0   | |
49  *        |Packet |    | +-------------+ |
50  *        |event  |    | +-------------+ |
51  *        |       |    | |    flow 1   | |port_link(port0, queue0)
52  *        +-------+    | +-------------+ |     |     +--------+
53  *        +-------+    | +-------------+ o-----v-----o        |dequeue +------+
54  *        |Crypto |    | |    flow n   | |           | event  +------->|Core 0|
55  *        |work   |    | +-------------+ o----+      | port 0 |        |      |
56  *        |done ev|    |  event queue 0  |    |      +--------+        +------+
57  *        +-------+    +-----------------+    |
58  *        +-------+                           |
59  *        |Timer  |    +-----------------+    |      +--------+
60  *        |expiry |    | +-------------+ |    +------o        |dequeue +------+
61  *        |event  |    | |    flow 0   | o-----------o event  +------->|Core 1|
62  *        +-------+    | +-------------+ |      +----o port 1 |        |      |
63  *       Event enqueue | +-------------+ |      |    +--------+        +------+
64  *     o-------------> | |    flow 1   | |      |
65  *        enqueue(     | +-------------+ |      |
66  *        queue_id,    |                 |      |    +--------+        +------+
67  *        flow_id,     | +-------------+ |      |    |        |dequeue |Core 2|
68  *        sched_type,  | |    flow n   | o-----------o event  +------->|      |
69  *        event_type,  | +-------------+ |      |    | port 2 |        +------+
70  *        subev_type,  |  event queue 1  |      |    +--------+
71  *        event)       +-----------------+      |    +--------+
72  *                                              |    |        |dequeue +------+
73  *        +-------+    +-----------------+      |    | event  +------->|Core n|
74  *        |Core   |    | +-------------+ o-----------o port n |        |      |
75  *        |(SW)   |    | |    flow 0   | |      |    +--------+        +--+---+
76  *        |event  |    | +-------------+ |      |                         |
77  *        +-------+    | +-------------+ |      |                         |
78  *            ^        | |    flow 1   | |      |                         |
79  *            |        | +-------------+ o------+                         |
80  *            |        | +-------------+ |                                |
81  *            |        | |    flow n   | |                                |
82  *            |        | +-------------+ |                                |
83  *            |        |  event queue n  |                                |
84  *            |        +-----------------+                                |
85  *            |                                                           |
86  *            +-----------------------------------------------------------+
87  *
88  * **Event device**: A hardware or software-based event scheduler.
89  *
90  * **Event**: Represents an item of work and is the smallest unit of scheduling.
91  * An event carries metadata, such as queue ID, scheduling type, and event priority,
92  * and data such as one or more packets or other kinds of buffers.
93  * Some examples of events are:
94  * - a software-generated item of work originating from a lcore,
95  *   perhaps carrying a packet to be processed.
96  * - a crypto work completion notification.
97  * - a timer expiry notification.
98  *
99  * **Event queue**: A queue containing events that are to be scheduled by the event device.
100  * An event queue contains events of different flows associated with scheduling
101  * types, such as atomic, ordered, or parallel.
102  * Each event given to an event device must have a valid event queue id field in the metadata,
103  * to specify on which event queue in the device the event must be placed,
104  * for later scheduling.
105  *
106  * **Event port**: An application's interface into the event dev for enqueue and
107  * dequeue operations. Each event port can be linked with one or more
108  * event queues for dequeue operations.
109  * Enqueue and dequeue from a port is not thread-safe, and the expected use-case is
110  * that each port is polled by only a single lcore. [If this is not the case,
111  * a suitable synchronization mechanism should be used to prevent simultaneous
112  * access from multiple lcores.]
113  * To schedule events to an lcore, the event device will schedule them to the event port(s)
114  * being polled by that lcore.
115  *
116  * *NOTE*: By default, all the functions of the Event Device API exported by a PMD
117  * are non-thread-safe functions, which must not be invoked on the same object in parallel on
118  * different logical cores.
119  * For instance, the dequeue function of a PMD cannot be invoked in parallel on two logical
120  * cores to operate on same  event port. Of course, this function
121  * can be invoked in parallel by different logical cores on different ports.
122  * It is the responsibility of the upper level application to enforce this rule.
123  *
124  * In all functions of the Event API, the Event device is
125  * designated by an integer >= 0 named the device identifier *dev_id*
126  *
127  * The functions exported by the application Event API to setup a device
128  * must be invoked in the following order:
129  *     - rte_event_dev_configure()
130  *     - rte_event_queue_setup()
131  *     - rte_event_port_setup()
132  *     - rte_event_port_link()
133  *     - rte_event_dev_start()
134  *
135  * Then, the application can invoke, in any order, the functions
136  * exported by the Event API to dequeue events, enqueue events,
137  * and link and unlink event queue(s) to event ports.
138  *
139  * Before configuring a device, an application should call rte_event_dev_info_get()
140  * to determine the capabilities of the event device, and any queue or port
141  * limits of that device. The parameters set in the various device configuration
142  * structures may need to be adjusted based on the max values provided in the
143  * device information structure returned from the rte_event_dev_info_get() API.
144  * An application may use rte_event_queue_default_conf_get() or
145  * rte_event_port_default_conf_get() to get the default configuration
146  * to set up an event queue or event port by overriding few default values.
147  *
148  * If the application wants to change the configuration (i.e. call
149  * rte_event_dev_configure(), rte_event_queue_setup(), or
150  * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the
151  * device and then do the reconfiguration before calling rte_event_dev_start()
152  * again. The schedule, enqueue and dequeue functions should not be invoked
153  * when the device is stopped.
154  *
155  * Finally, an application can close an Event device by invoking the
156  * rte_event_dev_close() function. Once closed, a device cannot be
157  * reconfigured or restarted.
158  *
159  * Driver-Oriented Event API
160  * -------------------------
161  *
162  * At the Event driver level, Event devices are represented by a generic
163  * data structure of type *rte_event_dev*.
164  *
165  * Event devices are dynamically registered during the PCI/SoC device probing
166  * phase performed at EAL initialization time.
167  * When an Event device is being probed, an *rte_event_dev* structure is allocated
168  * for it and the event_dev_init() function supplied by the Event driver
169  * is invoked to properly initialize the device.
170  *
171  * The role of the device init function is to reset the device hardware or
172  * to initialize the software event driver implementation.
173  *
174  * If the device init operation is successful, the device is assigned a device
175  * id (dev_id) for application use.
176  * Otherwise, the *rte_event_dev* structure is freed.
177  *
178  * Each function of the application Event API invokes a specific function
179  * of the PMD that controls the target device designated by its device
180  * identifier.
181  *
182  * For this purpose, all device-specific functions of an Event driver are
183  * supplied through a set of pointers contained in a generic structure of type
184  * *event_dev_ops*.
185  * The address of the *event_dev_ops* structure is stored in the *rte_event_dev*
186  * structure by the device init function of the Event driver, which is
187  * invoked during the PCI/SoC device probing phase, as explained earlier.
188  *
189  * In other words, each function of the Event API simply retrieves the
190  * *rte_event_dev* structure associated with the device identifier and
191  * performs an indirect invocation of the corresponding driver function
192  * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure.
193  *
194  * For performance reasons, the addresses of the fast-path functions of the
195  * event driver are not contained in the *event_dev_ops* structure.
196  * Instead, they are directly stored at the beginning of the *rte_event_dev*
197  * structure to avoid an extra indirect memory access during their invocation.
198  *
199  * Event Enqueue, Dequeue and Scheduling
200  * -------------------------------------
201  *
202  * RTE event device drivers do not use interrupts for enqueue or dequeue
203  * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue
204  * functions to applications.
205  *
206  * The events are injected to event device through *enqueue* operation by
207  * event producers in the system. The typical event producers are ethdev
208  * subsystem for generating packet events, CPU(SW) for generating events based
209  * on different stages of application processing, cryptodev for generating
210  * crypto work completion notification etc
211  *
212  * The *dequeue* operation gets one or more events from the event ports.
213  * The application processes the events and sends them to a downstream event queue through
214  * rte_event_enqueue_burst(), if it is an intermediate stage of event processing.
215  * On the final stage of processing, the application may use the Tx adapter API for maintaining
216  * the event ingress order while sending the packet/event on the wire via NIC Tx.
217  *
218  * The point at which events are scheduled to ports depends on the device.
219  * For hardware devices, scheduling occurs asynchronously without any software
220  * intervention. Software schedulers can either be distributed
221  * (each worker thread schedules events to its own port) or centralized
222  * (a dedicated thread schedules to all ports). Distributed software schedulers
223  * perform the scheduling inside the enqueue or dequeue functions, whereas centralized
224  * software schedulers need a dedicated service core for scheduling.
225  * The absence of the RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag
226  * indicates that the device is centralized and thus needs a dedicated scheduling
227  * thread (generally an RTE service that should be mapped to one or more service cores)
228  * that repeatedly calls the software specific scheduling function.
229  *
230  * An event driven worker thread has following typical workflow on fastpath:
231  * \code{.c}
232  *	while (1) {
233  *		rte_event_dequeue_burst(...);
234  *		(event processing)
235  *		rte_event_enqueue_burst(...);
236  *	}
237  * \endcode
238  */
239 
240 #ifdef __cplusplus
241 extern "C" {
242 #endif
243 
244 #include <rte_compat.h>
245 #include <rte_common.h>
246 #include <rte_errno.h>
247 #include <rte_mbuf_pool_ops.h>
248 #include <rte_mempool.h>
249 
250 #include "rte_eventdev_trace_fp.h"
251 
252 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
253 struct rte_event;
254 
255 /* Event device capability bitmap flags */
256 #define RTE_EVENT_DEV_CAP_QUEUE_QOS           (1ULL << 0)
257 /**< Event scheduling prioritization is based on the priority and weight
258  * associated with each event queue.
259  *
260  * Events from a queue with highest priority
261  * are scheduled first. If the queues are of same priority, weight of the queues
262  * are considered to select a queue in a weighted round robin fashion.
263  * Subsequent dequeue calls from an event port could see events from the same
264  * event queue, if the queue is configured with an affinity count. Affinity
265  * count is the number of subsequent dequeue calls, in which an event port
266  * should use the same event queue if the queue is non-empty
267  *
268  * NOTE: A device may use both queue prioritization and event prioritization
269  * (@ref RTE_EVENT_DEV_CAP_EVENT_QOS capability) when making packet scheduling decisions.
270  *
271  *  @see rte_event_queue_setup()
272  *  @see rte_event_queue_attr_set()
273  */
274 #define RTE_EVENT_DEV_CAP_EVENT_QOS           (1ULL << 1)
275 /**< Event scheduling prioritization is based on the priority associated with
276  *  each event.
277  *
278  *  Priority of each event is supplied in *rte_event* structure
279  *  on each enqueue operation.
280  *  If this capability is not set, the priority field of the event structure
281  *  is ignored for each event.
282  *
283  * NOTE: A device may use both queue prioritization (@ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability)
284  * and event prioritization when making packet scheduling decisions.
285 
286  *  @see rte_event_enqueue_burst()
287  */
288 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED   (1ULL << 2)
289 /**< Event device operates in distributed scheduling mode.
290  *
291  * In distributed scheduling mode, event scheduling happens in HW or
292  * rte_event_dequeue_burst() / rte_event_enqueue_burst() or the combination of these two.
293  * If the flag is not set then eventdev is centralized and thus needs a
294  * dedicated service core that acts as a scheduling thread.
295  *
296  * @see rte_event_dev_service_id_get()
297  */
298 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES     (1ULL << 3)
299 /**< Event device is capable of accepting enqueued events, of any type
300  * advertised as supported by the device, to all destination queues.
301  *
302  * When this capability is set, and @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag is set
303  * in @ref rte_event_queue_conf.event_queue_cfg, the "schedule_type" field of the
304  * @ref rte_event_queue_conf structure is ignored when a queue is being configured.
305  * Instead the "sched_type" field of each event enqueued is used to
306  * select the scheduling to be performed on that event.
307  *
308  * If this capability is not set, or the configuration flag is not set,
309  * the queue only supports events of the *RTE_SCHED_TYPE_* type specified
310  * in the @ref rte_event_queue_conf structure  at time of configuration.
311  * The behaviour when events of other scheduling types are sent to the queue is
312  * undefined.
313  *
314  * @see RTE_EVENT_QUEUE_CFG_ALL_TYPES
315  * @see RTE_SCHED_TYPE_ATOMIC
316  * @see RTE_SCHED_TYPE_ORDERED
317  * @see RTE_SCHED_TYPE_PARALLEL
318  * @see rte_event_queue_conf.event_queue_cfg
319  * @see rte_event_queue_conf.schedule_type
320  * @see rte_event_enqueue_burst()
321  */
322 #define RTE_EVENT_DEV_CAP_BURST_MODE          (1ULL << 4)
323 /**< Event device is capable of operating in burst mode for enqueue(forward,
324  * release) and dequeue operation.
325  *
326  * If this capability is not set, application
327  * can still use the rte_event_dequeue_burst() and rte_event_enqueue_burst() but
328  * PMD accepts or returns only one event at a time.
329  *
330  * @see rte_event_dequeue_burst()
331  * @see rte_event_enqueue_burst()
332  */
333 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE    (1ULL << 5)
334 /**< Event device ports support disabling the implicit release feature, in
335  * which the port will release all unreleased events in its dequeue operation.
336  *
337  * If this capability is set and the port is configured with implicit release
338  * disabled, the application is responsible for explicitly releasing events
339  * using either the @ref RTE_EVENT_OP_FORWARD or the @ref RTE_EVENT_OP_RELEASE event
340  * enqueue operations.
341  *
342  * @see rte_event_dequeue_burst()
343  * @see rte_event_enqueue_burst()
344  */
345 
346 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE         (1ULL << 6)
347 /**< Event device is capable of operating in non-sequential mode.
348  *
349  * The path of the event is not necessary to be sequential. Application can change
350  * the path of event at runtime and events may be sent to queues in any order.
351  *
352  * If the flag is not set, then event each event will follow a path from queue 0
353  * to queue 1 to queue 2 etc.
354  * The eventdev will return an error when the application enqueues an event for a
355  * qid which is not the next in the sequence.
356  */
357 
358 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK   (1ULL << 7)
359 /**< Event device is capable of reconfiguring the queue/port link at runtime.
360  *
361  * If the flag is not set, the eventdev queue/port link is only can be
362  * configured during  initialization, or by stopping the device and
363  * then later restarting it after reconfiguration.
364  *
365  * @see rte_event_port_link()
366  * @see rte_event_port_unlink()
367  */
368 
369 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
370 /**< Event device is capable of setting up links between multiple queues and a single port.
371  *
372  * If the flag is not set, each port may only be linked to a single queue, and
373  * so can only receive events from that queue.
374  * However, each queue may be linked to multiple ports.
375  *
376  * @see rte_event_port_link()
377  */
378 
379 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
380 /**< Event device preserves the flow ID from the enqueued event to the dequeued event.
381  *
382  * If this flag is not set,
383  * the content of the flow-id field in dequeued events is implementation dependent.
384  *
385  * @see rte_event_dequeue_burst()
386  */
387 
388 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
389 /**< Event device *does not* require calls to rte_event_maintain().
390  *
391  * An event device that does not set this flag requires calls to
392  * rte_event_maintain() during periods when neither
393  * rte_event_dequeue_burst() nor rte_event_enqueue_burst() are called
394  * on a port. This will allow the event device to perform internal
395  * processing, such as flushing buffered events, return credits to a
396  * global pool, or process signaling related to load balancing.
397  *
398  * @see rte_event_maintain()
399  */
400 
401 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
402 /**< Event device is capable of changing the queue attributes at runtime i.e
403  * after rte_event_queue_setup() or rte_event_dev_start() call sequence.
404  *
405  * If this flag is not set, event queue attributes can only be configured during
406  * rte_event_queue_setup().
407  *
408  * @see rte_event_queue_setup()
409  */
410 
411 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
412 /**< Event device is capable of supporting multiple link profiles per event port.
413  *
414  * When set, the value of `rte_event_dev_info::max_profiles_per_port` is greater
415  * than one, and multiple profiles may be configured and then switched at runtime.
416  * If not set, only a single profile may be configured, which may itself be
417  * runtime adjustable (if @ref RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK is set).
418  *
419  * @see rte_event_port_profile_links_set()
420  * @see rte_event_port_profile_links_get()
421  * @see rte_event_port_profile_switch()
422  * @see RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK
423  */
424 
425 #define RTE_EVENT_DEV_CAP_ATOMIC  (1ULL << 13)
426 /**< Event device is capable of atomic scheduling.
427  * When this flag is set, the application can configure queues with scheduling type
428  * atomic on this event device.
429  *
430  * @see RTE_SCHED_TYPE_ATOMIC
431  */
432 
433 #define RTE_EVENT_DEV_CAP_ORDERED  (1ULL << 14)
434 /**< Event device is capable of ordered scheduling.
435  * When this flag is set, the application can configure queues with scheduling type
436  * ordered on this event device.
437  *
438  * @see RTE_SCHED_TYPE_ORDERED
439  */
440 
441 #define RTE_EVENT_DEV_CAP_PARALLEL  (1ULL << 15)
442 /**< Event device is capable of parallel scheduling.
443  * When this flag is set, the application can configure queues with scheduling type
444  * parallel on this event device.
445  *
446  * @see RTE_SCHED_TYPE_PARALLEL
447  */
448 
449 /* Event device priority levels */
450 #define RTE_EVENT_DEV_PRIORITY_HIGHEST   0
451 /**< Highest priority level for events and queues.
452  *
453  * @see rte_event_queue_setup()
454  * @see rte_event_enqueue_burst()
455  * @see rte_event_port_link()
456  */
457 #define RTE_EVENT_DEV_PRIORITY_NORMAL    128
458 /**< Normal priority level for events and queues.
459  *
460  * @see rte_event_queue_setup()
461  * @see rte_event_enqueue_burst()
462  * @see rte_event_port_link()
463  */
464 #define RTE_EVENT_DEV_PRIORITY_LOWEST    255
465 /**< Lowest priority level for events and queues.
466  *
467  * @see rte_event_queue_setup()
468  * @see rte_event_enqueue_burst()
469  * @see rte_event_port_link()
470  */
471 
472 /* Event queue scheduling weights */
473 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
474 /**< Highest weight of an event queue.
475  *
476  * @see rte_event_queue_attr_get()
477  * @see rte_event_queue_attr_set()
478  */
479 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
480 /**< Lowest weight of an event queue.
481  *
482  * @see rte_event_queue_attr_get()
483  * @see rte_event_queue_attr_set()
484  */
485 
486 /* Event queue scheduling affinity */
487 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
488 /**< Highest scheduling affinity of an event queue.
489  *
490  * @see rte_event_queue_attr_get()
491  * @see rte_event_queue_attr_set()
492  */
493 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
494 /**< Lowest scheduling affinity of an event queue.
495  *
496  * @see rte_event_queue_attr_get()
497  * @see rte_event_queue_attr_set()
498  */
499 
500 /**
501  * Get the total number of event devices.
502  *
503  * @return
504  *   The total number of usable event devices.
505  */
506 uint8_t
507 rte_event_dev_count(void);
508 
509 /**
510  * Get the device identifier for the named event device.
511  *
512  * @param name
513  *   Event device name to select the event device identifier.
514  *
515  * @return
516  *   Event device identifier (dev_id >= 0) on success.
517  *   Negative error code on failure:
518  *   - -EINVAL - input name parameter is invalid.
519  *   - -ENODEV - no event device found with that name.
520  */
521 int
522 rte_event_dev_get_dev_id(const char *name);
523 
524 /**
525  * Return the NUMA socket to which a device is connected.
526  *
527  * @param dev_id
528  *   The identifier of the device.
529  * @return
530  *   The NUMA socket id to which the device is connected or
531  *   a default of zero if the socket could not be determined.
532  *   -EINVAL on error, where the given dev_id value does not
533  *   correspond to any event device.
534  */
535 int
536 rte_event_dev_socket_id(uint8_t dev_id);
537 
538 /**
539  * Event device information
540  */
541 struct rte_event_dev_info {
542 	const char *driver_name;	/**< Event driver name. */
543 	struct rte_device *dev;	/**< Device information. */
544 	uint32_t min_dequeue_timeout_ns;
545 	/**< Minimum global dequeue timeout(ns) supported by this device. */
546 	uint32_t max_dequeue_timeout_ns;
547 	/**< Maximum global dequeue timeout(ns) supported by this device. */
548 	uint32_t dequeue_timeout_ns;
549 	/**< Configured global dequeue timeout(ns) for this device. */
550 	uint8_t max_event_queues;
551 	/**< Maximum event queues supported by this device.
552 	 *
553 	 * This count excludes any queues covered by @ref max_single_link_event_port_queue_pairs.
554 	 */
555 	uint32_t max_event_queue_flows;
556 	/**< Maximum number of flows within an event queue supported by this device. */
557 	uint8_t max_event_queue_priority_levels;
558 	/**< Maximum number of event queue priority levels supported by this device.
559 	 *
560 	 * Valid when the device has @ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
561 	 *
562 	 * The implementation shall normalize priority values specified between
563 	 * @ref RTE_EVENT_DEV_PRIORITY_HIGHEST and @ref RTE_EVENT_DEV_PRIORITY_LOWEST
564 	 * to map them internally to this range of priorities.
565 	 * [For devices supporting a power-of-2 number of priority levels, this
566 	 * normalization will be done via a right-shift operation, so only the top
567 	 * log2(max_levels) bits will be used by the event device.]
568 	 *
569 	 * @see rte_event_queue_conf.priority
570 	 */
571 	uint8_t max_event_priority_levels;
572 	/**< Maximum number of event priority levels by this device.
573 	 *
574 	 * Valid when the device has @ref RTE_EVENT_DEV_CAP_EVENT_QOS capability.
575 	 *
576 	 * The implementation shall normalize priority values specified between
577 	 * @ref RTE_EVENT_DEV_PRIORITY_HIGHEST and @ref RTE_EVENT_DEV_PRIORITY_LOWEST
578 	 * to map them internally to this range of priorities.
579 	 * [For devices supporting a power-of-2 number of priority levels, this
580 	 * normalization will be done via a right-shift operation, so only the top
581 	 * log2(max_levels) bits will be used by the event device.]
582 	 *
583 	 * @see rte_event.priority
584 	 */
585 	uint8_t max_event_ports;
586 	/**< Maximum number of event ports supported by this device.
587 	 *
588 	 * This count excludes any ports covered by @ref max_single_link_event_port_queue_pairs.
589 	 */
590 	uint8_t max_event_port_dequeue_depth;
591 	/**< Maximum number of events that can be dequeued at a time from an event port
592 	 * on this device.
593 	 *
594 	 * A device that does not support burst dequeue
595 	 * (@ref RTE_EVENT_DEV_CAP_BURST_MODE) will set this to 1.
596 	 */
597 	uint32_t max_event_port_enqueue_depth;
598 	/**< Maximum number of events that can be enqueued at a time to an event port
599 	 * on this device.
600 	 *
601 	 * A device that does not support burst enqueue
602 	 * (@ref RTE_EVENT_DEV_CAP_BURST_MODE) will set this to 1.
603 	 */
604 	uint8_t max_event_port_links;
605 	/**< Maximum number of queues that can be linked to a single event port on this device.
606 	 */
607 	int32_t max_num_events;
608 	/**< A *closed system* event dev has a limit on the number of events it
609 	 * can manage at a time.
610 	 * Once the number of events tracked by an eventdev exceeds this number,
611 	 * any enqueues of NEW events will fail.
612 	 * An *open system* event dev does not have a limit and will specify this as -1.
613 	 */
614 	uint32_t event_dev_cap;
615 	/**< Event device capabilities flags (RTE_EVENT_DEV_CAP_*). */
616 	uint8_t max_single_link_event_port_queue_pairs;
617 	/**< Maximum number of event ports and queues, supported by this device,
618 	 * that are optimized for (and only capable of) single-link configurations.
619 	 * These ports and queues are not accounted for in @ref max_event_ports
620 	 * or @ref max_event_queues.
621 	 */
622 	uint8_t max_profiles_per_port;
623 	/**< Maximum number of event queue link profiles per event port.
624 	 * A device that doesn't support multiple profiles will set this as 1.
625 	 */
626 };
627 
628 /**
629  * Retrieve details of an event device's capabilities and configuration limits.
630  *
631  * @param dev_id
632  *   The identifier of the device.
633  *
634  * @param[out] dev_info
635  *   A pointer to a structure of type *rte_event_dev_info* to be filled with the
636  *   information about the device's capabilities.
637  *
638  * @return
639  *   - 0: Success, information about the event device is present in dev_info.
640  *   - <0: Failure, error code returned by the function.
641  *     - -EINVAL - invalid input parameters, e.g. incorrect device id.
642  *     - -ENOTSUP - device does not support returning capabilities information.
643  */
644 int
645 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
646 
647 /**
648  * The count of ports.
649  */
650 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
651 /**
652  * The count of queues.
653  */
654 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
655 /**
656  * The status of the device, zero for stopped, non-zero for started.
657  */
658 #define RTE_EVENT_DEV_ATTR_STARTED 2
659 
660 /**
661  * Get an attribute from a device.
662  *
663  * @param dev_id Eventdev id
664  * @param attr_id The attribute ID to retrieve
665  * @param[out] attr_value A pointer that will be filled in with the attribute
666  *             value if successful.
667  *
668  * @return
669  *   - 0: Successfully retrieved attribute value
670  *   - -EINVAL: Invalid device or  *attr_id* provided, or *attr_value* is NULL
671  */
672 int
673 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
674 		       uint32_t *attr_value);
675 
676 
677 /* Event device configuration bitmap flags */
678 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
679 /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns.
680  *  @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst()
681  */
682 
683 /** Event device configuration structure */
684 struct rte_event_dev_config {
685 	uint32_t dequeue_timeout_ns;
686 	/**< rte_event_dequeue_burst() timeout on this device.
687 	 * This value should be in the range of @ref rte_event_dev_info.min_dequeue_timeout_ns and
688 	 * @ref rte_event_dev_info.max_dequeue_timeout_ns returned by
689 	 * @ref rte_event_dev_info_get()
690 	 * The value 0 is allowed, in which case, default dequeue timeout used.
691 	 * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
692 	 */
693 	int32_t nb_events_limit;
694 	/**< In a *closed system* this field is the limit on maximum number of
695 	 * events that can be inflight in the eventdev at a given time. The
696 	 * limit is required to ensure that the finite space in a closed system
697 	 * is not exhausted.
698 	 * The value cannot exceed @ref rte_event_dev_info.max_num_events
699 	 * returned by rte_event_dev_info_get().
700 	 *
701 	 * This value should be set to -1 for *open systems*, that is,
702 	 * those systems returning -1 in @ref rte_event_dev_info.max_num_events.
703 	 *
704 	 * @see rte_event_port_conf.new_event_threshold
705 	 */
706 	uint8_t nb_event_queues;
707 	/**< Number of event queues to configure on this device.
708 	 * This value *includes* any single-link queue-port pairs to be used.
709 	 * This value cannot exceed @ref rte_event_dev_info.max_event_queues +
710 	 * @ref rte_event_dev_info.max_single_link_event_port_queue_pairs
711 	 * returned by rte_event_dev_info_get().
712 	 * The number of non-single-link queues i.e. this value less
713 	 * *nb_single_link_event_port_queues* in this struct, cannot exceed
714 	 * @ref rte_event_dev_info.max_event_queues
715 	 */
716 	uint8_t nb_event_ports;
717 	/**< Number of event ports to configure on this device.
718 	 * This value *includes* any single-link queue-port pairs to be used.
719 	 * This value cannot exceed @ref rte_event_dev_info.max_event_ports +
720 	 * @ref rte_event_dev_info.max_single_link_event_port_queue_pairs
721 	 * returned by rte_event_dev_info_get().
722 	 * The number of non-single-link ports i.e. this value less
723 	 * *nb_single_link_event_port_queues* in this struct, cannot exceed
724 	 * @ref rte_event_dev_info.max_event_ports
725 	 */
726 	uint32_t nb_event_queue_flows;
727 	/**< Max number of flows needed for a single event queue on this device.
728 	 * This value cannot exceed @ref rte_event_dev_info.max_event_queue_flows
729 	 * returned by rte_event_dev_info_get()
730 	 */
731 	uint32_t nb_event_port_dequeue_depth;
732 	/**< Max number of events that can be dequeued at a time from an event port on this device.
733 	 * This value cannot exceed @ref rte_event_dev_info.max_event_port_dequeue_depth
734 	 * returned by rte_event_dev_info_get().
735 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
736 	 * @see rte_event_port_setup() rte_event_dequeue_burst()
737 	 */
738 	uint32_t nb_event_port_enqueue_depth;
739 	/**< Maximum number of events can be enqueued at a time to an event port on this device.
740 	 * This value cannot exceed @ref rte_event_dev_info.max_event_port_enqueue_depth
741 	 * returned by rte_event_dev_info_get().
742 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
743 	 * @see rte_event_port_setup() rte_event_enqueue_burst()
744 	 */
745 	uint32_t event_dev_cfg;
746 	/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
747 	uint8_t nb_single_link_event_port_queues;
748 	/**< Number of event ports and queues that will be singly-linked to
749 	 * each other. These are a subset of the overall event ports and
750 	 * queues; this value cannot exceed *nb_event_ports* or
751 	 * *nb_event_queues*. If the device has ports and queues that are
752 	 * optimized for single-link usage, this field is a hint for how many
753 	 * to allocate; otherwise, regular event ports and queues will be used.
754 	 */
755 };
756 
757 /**
758  * Configure an event device.
759  *
760  * This function must be invoked before any other configuration function in the
761  * API, when preparing an event device for application use.
762  * This function can also be re-invoked when a device is in the stopped state.
763  *
764  * The caller should use rte_event_dev_info_get() to get the capabilities and
765  * resource limits for this event device before calling this API.
766  * Many values in the dev_conf input parameter are subject to limits given
767  * in the device information returned from rte_event_dev_info_get().
768  *
769  * @param dev_id
770  *   The identifier of the device to configure.
771  * @param dev_conf
772  *   The event device configuration structure.
773  *
774  * @return
775  *   - 0: Success, device configured.
776  *   - <0: Error code returned by the driver configuration function.
777  *     - -ENOTSUP - device does not support configuration.
778  *     - -EINVAL  - invalid input parameter.
779  *     - -EBUSY   - device has already been started.
780  */
781 int
782 rte_event_dev_configure(uint8_t dev_id,
783 			const struct rte_event_dev_config *dev_conf);
784 
785 /* Event queue specific APIs */
786 
787 /* Event queue configuration bitmap flags */
788 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES          (1ULL << 0)
789 /**< Allow events with schedule types ATOMIC, ORDERED, and PARALLEL to be enqueued to this queue.
790  *
791  * The scheduling type to be used is that specified in each individual event.
792  * This flag can only be set when configuring queues on devices reporting the
793  * @ref RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES capability.
794  *
795  * Without this flag, only events with the specific scheduling type configured at queue setup
796  * can be sent to the queue.
797  *
798  * @see RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES
799  * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
800  * @see rte_event_enqueue_burst()
801  */
802 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK        (1ULL << 1)
803 /**< This event queue links only to a single event port.
804  *
805  * No load-balancing of events is performed, as all events
806  * sent to this queue end up at the same event port.
807  * The number of queues on which this flag is to be set must be
808  * configured at device configuration time, by setting
809  * @ref rte_event_dev_config.nb_single_link_event_port_queues
810  * parameter appropriately.
811  *
812  * This flag serves as a hint only, any devices without specific
813  * support for single-link queues can fall-back automatically to
814  * using regular queues with a single destination port.
815  *
816  *  @see rte_event_dev_info.max_single_link_event_port_queue_pairs
817  *  @see rte_event_dev_config.nb_single_link_event_port_queues
818  *  @see rte_event_port_setup(), rte_event_port_link()
819  */
820 
821 /** Event queue configuration structure */
822 struct rte_event_queue_conf {
823 	uint32_t nb_atomic_flows;
824 	/**< The maximum number of active flows this queue can track at any
825 	 * given time.
826 	 *
827 	 * If the queue is configured for atomic scheduling (by
828 	 * applying the @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to
829 	 * @ref rte_event_queue_conf.event_queue_cfg
830 	 * or @ref RTE_SCHED_TYPE_ATOMIC flag to @ref rte_event_queue_conf.schedule_type), then the
831 	 * value must be in the range of [1, @ref rte_event_dev_config.nb_event_queue_flows],
832 	 * which was previously provided in rte_event_dev_configure().
833 	 *
834 	 * If the queue is not configured for atomic scheduling this value is ignored.
835 	 */
836 	uint32_t nb_atomic_order_sequences;
837 	/**< The maximum number of outstanding events waiting to be
838 	 * reordered by this queue. In other words, the number of entries in
839 	 * this queue’s reorder buffer. When the number of events in the
840 	 * reorder buffer reaches to *nb_atomic_order_sequences* then the
841 	 * scheduler cannot schedule the events from this queue and no
842 	 * events will be returned from dequeue until one or more entries are
843 	 * freed up/released.
844 	 *
845 	 * If the queue is configured for ordered scheduling (by applying the
846 	 * @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to @ref rte_event_queue_conf.event_queue_cfg or
847 	 * @ref RTE_SCHED_TYPE_ORDERED flag to @ref rte_event_queue_conf.schedule_type),
848 	 * then the value must be in the range of
849 	 * [1, @ref rte_event_dev_config.nb_event_queue_flows], which was
850 	 * previously supplied to rte_event_dev_configure().
851 	 *
852 	 * If the queue is not configured for ordered scheduling, then this value is ignored.
853 	 */
854 	uint32_t event_queue_cfg;
855 	/**< Queue cfg flags(EVENT_QUEUE_CFG_) */
856 	uint8_t schedule_type;
857 	/**< Queue schedule type(RTE_SCHED_TYPE_*).
858 	 *
859 	 * Valid when @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag is not set in
860 	 * @ref rte_event_queue_conf.event_queue_cfg.
861 	 *
862 	 * If the @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag is set, then this field is ignored.
863 	 *
864 	 * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
865 	 */
866 	uint8_t priority;
867 	/**< Priority for this event queue relative to other event queues.
868 	 *
869 	 * The requested priority should in the range of
870 	 * [@ref RTE_EVENT_DEV_PRIORITY_HIGHEST, @ref RTE_EVENT_DEV_PRIORITY_LOWEST].
871 	 * The implementation shall normalize the requested priority to
872 	 * event device supported priority value.
873 	 *
874 	 * Valid when the device has @ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability,
875 	 * ignored otherwise
876 	 */
877 	uint8_t weight;
878 	/**< Weight of the event queue relative to other event queues.
879 	 *
880 	 * The requested weight should be in the range of
881 	 * [@ref RTE_EVENT_QUEUE_WEIGHT_HIGHEST, @ref RTE_EVENT_QUEUE_WEIGHT_LOWEST].
882 	 * The implementation shall normalize the requested weight to event
883 	 * device supported weight value.
884 	 *
885 	 * Valid when the device has @ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability,
886 	 * ignored otherwise.
887 	 */
888 	uint8_t affinity;
889 	/**< Affinity of the event queue relative to other event queues.
890 	 *
891 	 * The requested affinity should be in the range of
892 	 * [@ref RTE_EVENT_QUEUE_AFFINITY_HIGHEST, @ref RTE_EVENT_QUEUE_AFFINITY_LOWEST].
893 	 * The implementation shall normalize the requested affinity to event
894 	 * device supported affinity value.
895 	 *
896 	 * Valid when the device has @ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability,
897 	 * ignored otherwise.
898 	 */
899 };
900 
901 /**
902  * Retrieve the default configuration information of an event queue designated
903  * by its *queue_id* from the event driver for an event device.
904  *
905  * This function intended to be used in conjunction with rte_event_queue_setup()
906  * where caller needs to set up the queue by overriding few default values.
907  *
908  * @param dev_id
909  *   The identifier of the device.
910  * @param queue_id
911  *   The index of the event queue to get the configuration information.
912  *   The value must be less than @ref rte_event_dev_config.nb_event_queues
913  *   previously supplied to rte_event_dev_configure().
914  * @param[out] queue_conf
915  *   The pointer to the default event queue configuration data.
916  * @return
917  *   - 0: Success, driver updates the default event queue configuration data.
918  *   - <0: Error code returned by the driver info get function.
919  *
920  * @see rte_event_queue_setup()
921  */
922 int
923 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
924 				 struct rte_event_queue_conf *queue_conf);
925 
926 /**
927  * Allocate and set up an event queue for an event device.
928  *
929  * @param dev_id
930  *   The identifier of the device.
931  * @param queue_id
932  *   The index of the event queue to setup. The value must be
933  *   less than @ref rte_event_dev_config.nb_event_queues previously supplied to
934  *   rte_event_dev_configure().
935  * @param queue_conf
936  *   The pointer to the configuration data to be used for the event queue.
937  *   NULL value is allowed, in which case default configuration	used.
938  *
939  * @see rte_event_queue_default_conf_get()
940  *
941  * @return
942  *   - 0: Success, event queue correctly set up.
943  *   - <0: event queue configuration failed.
944  */
945 int
946 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
947 		      const struct rte_event_queue_conf *queue_conf);
948 
949 /**
950  * Queue attribute id for the priority of the queue.
951  */
952 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
953 /**
954  * Queue attribute id for the number of atomic flows configured for the queue.
955  */
956 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
957 /**
958  * Queue attribute id for the number of atomic order sequences configured for the queue.
959  */
960 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
961 /**
962  * Queue attribute id for the configuration flags for the queue.
963  */
964 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
965 /**
966  * Queue attribute id for the schedule type of the queue.
967  */
968 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
969 /**
970  * Queue attribute id for the weight of the queue.
971  */
972 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
973 /**
974  * Queue attribute id for the affinity of the queue.
975  */
976 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
977 
978 /**
979  * Get an attribute of an event queue.
980  *
981  * @param dev_id
982  *   The identifier of the device.
983  * @param queue_id
984  *   The index of the event queue to query. The value must be less than
985  *   @ref rte_event_dev_config.nb_event_queues previously supplied to rte_event_dev_configure().
986  * @param attr_id
987  *   The attribute ID to retrieve (RTE_EVENT_QUEUE_ATTR_*).
988  * @param[out] attr_value
989  *   A pointer that will be filled in with the attribute value if successful.
990  *
991  * @return
992  *   - 0: Successfully returned value
993  *   - -EINVAL: invalid device, queue or attr_id provided, or attr_value was NULL.
994  *   - -EOVERFLOW: returned when attr_id is set to
995  *   @ref RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES is
996  *   set in the queue configuration flags.
997  */
998 int
999 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
1000 			uint32_t *attr_value);
1001 
1002 /**
1003  * Set an event queue attribute.
1004  *
1005  * @param dev_id
1006  *   The identifier of the device.
1007  * @param queue_id
1008  *   The index of the event queue to configure. The value must be less than
1009  *   @ref rte_event_dev_config.nb_event_queues previously supplied to rte_event_dev_configure().
1010  * @param attr_id
1011  *   The attribute ID to set (RTE_EVENT_QUEUE_ATTR_*).
1012  * @param attr_value
1013  *   The attribute value to set.
1014  *
1015  * @return
1016  *   - 0: Successfully set attribute.
1017  *   - <0: failed to set event queue attribute.
1018  *   -   -EINVAL: invalid device, queue or attr_id.
1019  *   -   -ENOTSUP: device does not support setting the event attribute.
1020  */
1021 int
1022 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
1023 			 uint64_t attr_value);
1024 
1025 /* Event port specific APIs */
1026 
1027 /* Event port configuration bitmap flags */
1028 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
1029 /**< Configure the port not to release outstanding events in
1030  * rte_event_dev_dequeue_burst(). If set, all events received through
1031  * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
1032  * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
1033  * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
1034  */
1035 #define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)
1036 /**< This event port links only to a single event queue.
1037  * The queue it links with should be similarly configured with the
1038  * @ref RTE_EVENT_QUEUE_CFG_SINGLE_LINK flag.
1039  *
1040  *  @see RTE_EVENT_QUEUE_CFG_SINGLE_LINK
1041  *  @see rte_event_port_setup(), rte_event_port_link()
1042  */
1043 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER       (1ULL << 2)
1044 /**< Hint that this event port will primarily enqueue events to the system.
1045  * A PMD can optimize its internal workings by assuming that this port is
1046  * primarily going to enqueue NEW events.
1047  *
1048  * Note that this flag is only a hint, so PMDs must operate under the
1049  * assumption that any port can enqueue an event with any type of op.
1050  *
1051  *  @see rte_event_port_setup()
1052  */
1053 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER       (1ULL << 3)
1054 /**< Hint that this event port will primarily dequeue events from the system.
1055  * A PMD can optimize its internal workings by assuming that this port is
1056  * primarily going to consume events, and not enqueue NEW or FORWARD
1057  * events.
1058  *
1059  * Note that this flag is only a hint, so PMDs must operate under the
1060  * assumption that any port can enqueue an event with any type of op.
1061  *
1062  *  @see rte_event_port_setup()
1063  */
1064 #define RTE_EVENT_PORT_CFG_HINT_WORKER         (1ULL << 4)
1065 /**< Hint that this event port will primarily pass existing events through.
1066  * A PMD can optimize its internal workings by assuming that this port is
1067  * primarily going to FORWARD events, and not enqueue NEW or RELEASE events
1068  * often.
1069  *
1070  * Note that this flag is only a hint, so PMDs must operate under the
1071  * assumption that any port can enqueue an event with any type of op.
1072  *
1073  *  @see rte_event_port_setup()
1074  */
1075 
1076 /** Event port configuration structure */
1077 struct rte_event_port_conf {
1078 	int32_t new_event_threshold;
1079 	/**< A backpressure threshold for new event enqueues on this port.
1080 	 * Use for *closed system* event dev where event capacity is limited,
1081 	 * and cannot exceed the capacity of the event dev.
1082 	 *
1083 	 * Configuring ports with different thresholds can make higher priority
1084 	 * traffic less likely to  be backpressured.
1085 	 * For example, a port used to inject NIC Rx packets into the event dev
1086 	 * can have a lower threshold so as not to overwhelm the device,
1087 	 * while ports used for worker pools can have a higher threshold.
1088 	 * This value cannot exceed the @ref rte_event_dev_config.nb_events_limit value
1089 	 * which was previously supplied to rte_event_dev_configure().
1090 	 *
1091 	 * This should be set to '-1' for *open system*, i.e when
1092 	 * @ref rte_event_dev_info.max_num_events == -1.
1093 	 */
1094 	uint16_t dequeue_depth;
1095 	/**< Configure the maximum size of burst dequeues for this event port.
1096 	 * This value cannot exceed the @ref rte_event_dev_config.nb_event_port_dequeue_depth value
1097 	 * which was previously supplied to rte_event_dev_configure().
1098 	 *
1099 	 * Ignored when device does not support the @ref RTE_EVENT_DEV_CAP_BURST_MODE capability.
1100 	 */
1101 	uint16_t enqueue_depth;
1102 	/**< Configure the maximum size of burst enqueues to this event port.
1103 	 * This value cannot exceed the @ref rte_event_dev_config.nb_event_port_enqueue_depth value
1104 	 * which was previously supplied to rte_event_dev_configure().
1105 	 *
1106 	 * Ignored when device does not support the @ref RTE_EVENT_DEV_CAP_BURST_MODE capability.
1107 	 */
1108 	uint32_t event_port_cfg; /**< Port configuration flags(EVENT_PORT_CFG_) */
1109 };
1110 
1111 /**
1112  * Retrieve the default configuration information of an event port designated
1113  * by its *port_id* from the event driver for an event device.
1114  *
1115  * This function is intended to be used in conjunction with rte_event_port_setup()
1116  * where the caller can set up the port by just overriding few default values.
1117  *
1118  * @param dev_id
1119  *   The identifier of the device.
1120  * @param port_id
1121  *   The index of the event port to get the configuration information.
1122  *   The value must be less than @ref rte_event_dev_config.nb_event_ports
1123  *   previously supplied to rte_event_dev_configure().
1124  * @param[out] port_conf
1125  *   The pointer to a structure to store the default event port configuration data.
1126  * @return
1127  *   - 0: Success, driver updates the default event port configuration data.
1128  *   - <0: Error code returned by the driver info get function.
1129  *      - -EINVAL - invalid input parameter.
1130  *      - -ENOTSUP - function is not supported for this device.
1131  *
1132  * @see rte_event_port_setup()
1133  */
1134 int
1135 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
1136 				struct rte_event_port_conf *port_conf);
1137 
1138 /**
1139  * Allocate and set up an event port for an event device.
1140  *
1141  * @param dev_id
1142  *   The identifier of the device.
1143  * @param port_id
1144  *   The index of the event port to setup. The value must be less than
1145  *   @ref rte_event_dev_config.nb_event_ports previously supplied to
1146  *   rte_event_dev_configure().
1147  * @param port_conf
1148  *   The pointer to the configuration data to be used for the port.
1149  *   NULL value is allowed, in which case the default configuration is used.
1150  *
1151  * @see rte_event_port_default_conf_get()
1152  *
1153  * @return
1154  *   - 0: Success, event port correctly set up.
1155  *   - <0: Port configuration failed.
1156  *     - -EINVAL - Invalid input parameter.
1157  *     - -EBUSY - Port already started.
1158  *     - -ENOTSUP - Function not supported on this device, or a NULL pointer passed
1159  *        as the port_conf parameter, and no default configuration function available
1160  *        for this device.
1161  *     - -EDQUOT - Application tried to link a queue configured
1162  *      with @ref RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event port.
1163  */
1164 int
1165 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
1166 		     const struct rte_event_port_conf *port_conf);
1167 
1168 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
1169 					  struct rte_event event, void *arg);
1170 /**< Callback function prototype that can be passed during
1171  * rte_event_port_release(), invoked once per a released event.
1172  */
1173 
1174 /**
1175  * Quiesce any core specific resources consumed by the event port.
1176  *
1177  * Event ports are generally coupled with lcores, and a given Hardware
1178  * implementation might require the PMD to store port specific data in the
1179  * lcore.
1180  * When the application decides to migrate the event port to another lcore
1181  * or teardown the current lcore it may to call `rte_event_port_quiesce`
1182  * to make sure that all the data associated with the event port are released
1183  * from the lcore, this might also include any prefetched events.
1184  * While releasing the event port from the lcore, this function calls the
1185  * user-provided flush callback once per event.
1186  *
1187  * @note Invocation of this API does not affect the existing port configuration.
1188  *
1189  * @param dev_id
1190  *   The identifier of the device.
1191  * @param port_id
1192  *   The index of the event port to quiesce. The value must be less than
1193  *   @ref rte_event_dev_config.nb_event_ports previously supplied to rte_event_dev_configure().
1194  * @param release_cb
1195  *   Callback function invoked once per flushed event.
1196  * @param args
1197  *   Argument supplied to callback.
1198  */
1199 void
1200 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
1201 		       rte_eventdev_port_flush_t release_cb, void *args);
1202 
1203 /**
1204  * Port attribute id for the maximum size of a burst enqueue operation supported on a port.
1205  */
1206 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
1207 /**
1208  * Port attribute id for the maximum size of a dequeue burst which can be returned from a port.
1209  */
1210 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
1211 /**
1212  * Port attribute id for the new event threshold of the port.
1213  * Once the number of events in the system exceeds this threshold, the enqueue of NEW-type
1214  * events will fail.
1215  */
1216 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
1217 /**
1218  * Port attribute id for the implicit release disable attribute of the port.
1219  */
1220 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
1221 
1222 /**
1223  * Get an attribute from a port.
1224  *
1225  * @param dev_id
1226  *   The identifier of the device.
1227  * @param port_id
1228  *   The index of the event port to query. The value must be less than
1229  *   @ref rte_event_dev_config.nb_event_ports previously supplied to rte_event_dev_configure().
1230  * @param attr_id
1231  *   The attribute ID to retrieve (RTE_EVENT_PORT_ATTR_*)
1232  * @param[out] attr_value
1233  *   A pointer that will be filled in with the attribute value if successful
1234  *
1235  * @return
1236  *   - 0: Successfully returned value.
1237  *   - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL.
1238  */
1239 int
1240 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1241 			uint32_t *attr_value);
1242 
1243 /**
1244  * Start an event device.
1245  *
1246  * The device start step is the last one in device setup, and enables the event
1247  * ports and queues to start accepting events and scheduling them to event ports.
1248  *
1249  * On success, all basic functions exported by the API (event enqueue,
1250  * event dequeue and so on) can be invoked.
1251  *
1252  * @param dev_id
1253  *   Event device identifier.
1254  * @return
1255  *   - 0: Success, device started.
1256  *   - -EINVAL:  Invalid device id provided.
1257  *   - -ENOTSUP: Device does not support this operation.
1258  *   - -ESTALE : Not all ports of the device are configured.
1259  *   - -ENOLINK: Not all queues are linked, which could lead to deadlock.
1260  */
1261 int
1262 rte_event_dev_start(uint8_t dev_id);
1263 
1264 /**
1265  * Stop an event device.
1266  *
1267  * This function causes all queued events to be drained, including those
1268  * residing in event ports. While draining events out of the device, this
1269  * function calls the user-provided flush callback (if one was registered) once
1270  * per event.
1271  *
1272  * The device can be restarted with a call to rte_event_dev_start(). Threads
1273  * that continue to enqueue/dequeue while the device is stopped, or being
1274  * stopped, will result in undefined behavior. This includes event adapters,
1275  * which must be stopped prior to stopping the eventdev.
1276  *
1277  * @param dev_id
1278  *   Event device identifier.
1279  *
1280  * @see rte_event_dev_stop_flush_callback_register()
1281  */
1282 void
1283 rte_event_dev_stop(uint8_t dev_id);
1284 
1285 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1286 					  struct rte_event event, void *arg);
1287 /**< Callback function called during rte_event_dev_stop(), invoked once per
1288  * flushed event.
1289  */
1290 
1291 /**
1292  * Registers a callback function to be invoked during rte_event_dev_stop() for
1293  * each flushed event. This function can be used to properly dispose of queued
1294  * events, for example events containing memory pointers.
1295  *
1296  * The callback function is only registered for the calling process. The
1297  * callback function must be registered in every process that can call
1298  * rte_event_dev_stop().
1299  *
1300  * Only one callback function may be registered. Each new call replaces
1301  * the existing registered callback function with the new function passed in.
1302  *
1303  * To unregister a callback, call this function with a NULL callback pointer.
1304  *
1305  * @param dev_id
1306  *   The identifier of the device.
1307  * @param callback
1308  *   Callback function to be invoked once per flushed event.
1309  *   Pass NULL to unset any previously-registered callback function.
1310  * @param userdata
1311  *   Argument supplied to callback.
1312  *
1313  * @return
1314  *  - 0 on success.
1315  *  - -EINVAL if *dev_id* is invalid.
1316  *
1317  * @see rte_event_dev_stop()
1318  */
1319 int rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1320 					       rte_eventdev_stop_flush_t callback, void *userdata);
1321 
1322 /**
1323  * Close an event device. The device cannot be restarted!
1324  *
1325  * @param dev_id
1326  *   Event device identifier.
1327  *
1328  * @return
1329  *  - 0 on successfully closing device
1330  *  - <0 on failure to close device.
1331  *    - -EINVAL - invalid device id.
1332  *    - -ENOTSUP - operation not supported for this device.
1333  *    - -EAGAIN - device is busy.
1334  */
1335 int
1336 rte_event_dev_close(uint8_t dev_id);
1337 
1338 /**
1339  * Event vector structure.
1340  */
1341 struct __rte_aligned(16) rte_event_vector {
1342 	uint16_t nb_elem;
1343 	/**< Number of elements valid in this event vector. */
1344 	uint16_t elem_offset : 12;
1345 	/**< Offset into the vector array where valid elements start from. */
1346 	uint16_t rsvd : 3;
1347 	/**< Reserved for future use */
1348 	uint16_t attr_valid : 1;
1349 	/**< Indicates that the below union attributes have valid information.
1350 	 */
1351 	union {
1352 		/* Used by Rx/Tx adapter.
1353 		 * Indicates that all the elements in this vector belong to the
1354 		 * same port and queue pair when originating from Rx adapter,
1355 		 * valid only when event type is ETHDEV_VECTOR or
1356 		 * ETH_RX_ADAPTER_VECTOR.
1357 		 * Can also be used to indicate the Tx adapter the destination
1358 		 * port and queue of the mbufs in the vector
1359 		 */
1360 		struct {
1361 			uint16_t port;   /**< Ethernet device port id. */
1362 			uint16_t queue;  /**< Ethernet device queue id. */
1363 		};
1364 	};
1365 	/**< Union to hold common attributes of the vector array. */
1366 	uint64_t impl_opaque;
1367 
1368 /* empty structures do not have zero size in C++ leading to compilation errors
1369  * with clang about structure having different sizes in C and C++.
1370  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1371  * C++ builds, removing the warning.
1372  */
1373 #ifndef __cplusplus
1374 	/**< Implementation specific opaque value.
1375 	 * An implementation may use this field to hold implementation specific
1376 	 * value to share between dequeue and enqueue operation.
1377 	 * The application should not modify this field.
1378 	 */
1379 	union __rte_aligned(16) {
1380 #endif
1381 		struct rte_mbuf *mbufs[0];
1382 		void *ptrs[0];
1383 		uint64_t u64s[0];
1384 #ifndef __cplusplus
1385 	};
1386 #endif
1387 	/**< Start of the vector array union. Depending upon the event type the
1388 	 * vector array can be an array of mbufs or pointers or opaque u64
1389 	 * values.
1390 	 */
1391 };
1392 
1393 /* Scheduler type definitions */
1394 #define RTE_SCHED_TYPE_ORDERED          0
1395 /**< Ordered scheduling
1396  *
1397  * Events from an ordered flow of an event queue can be scheduled to multiple
1398  * ports for concurrent processing while maintaining the original event order,
1399  * i.e. the order in which they were first enqueued to that queue.
1400  * This scheme allows events pertaining to the same, potentially large, flow to
1401  * be processed in parallel on multiple cores without incurring any
1402  * application-level order restoration logic overhead.
1403  *
1404  * After events are dequeued from a set of ports, as those events are re-enqueued
1405  * to another queue (with the op field set to @ref RTE_EVENT_OP_FORWARD), the event
1406  * device restores the original event order - including events returned from all
1407  * ports in the set - before the events are placed on the destination queue,
1408  * for subsequent scheduling to ports.
1409  *
1410  * Any events not forwarded i.e. dropped explicitly via RELEASE or implicitly
1411  * released by the next dequeue operation on a port, are skipped by the reordering
1412  * stage and do not affect the reordering of other returned events.
1413  *
1414  * Any NEW events sent on a port are not ordered with respect to FORWARD events sent
1415  * on the same port, since they have no original event order. They also are not
1416  * ordered with respect to NEW events enqueued on other ports.
1417  * However, NEW events to the same destination queue from the same port are guaranteed
1418  * to be enqueued in the order they were submitted via rte_event_enqueue_burst().
1419  *
1420  * NOTE:
1421  *   In restoring event order of forwarded events, the eventdev API guarantees that
1422  *   all events from the same flow (i.e. same @ref rte_event.flow_id,
1423  *   @ref rte_event.priority and @ref rte_event.queue_id) will be put in the original
1424  *   order before being forwarded to the destination queue.
1425  *   Some eventdevs may implement stricter ordering to achieve this aim,
1426  *   for example, restoring the order across *all* flows dequeued from the same ORDERED
1427  *   queue.
1428  *
1429  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1430  */
1431 
1432 #define RTE_SCHED_TYPE_ATOMIC           1
1433 /**< Atomic scheduling
1434  *
1435  * Events from an atomic flow, identified by a combination of @ref rte_event.flow_id,
1436  * @ref rte_event.queue_id and @ref rte_event.priority, can be scheduled only to a
1437  * single port at a time. The port is guaranteed to have exclusive (atomic)
1438  * access to the associated flow context, which enables the user to avoid SW
1439  * synchronization. Atomic flows also maintain event ordering
1440  * since only one port at a time can process events from each flow of an
1441  * event queue, and events within a flow are not reordered within the scheduler.
1442  *
1443  * An atomic flow is locked to a port when events from that flow are first
1444  * scheduled to that port. That lock remains in place until the
1445  * application calls rte_event_dequeue_burst() from the same port,
1446  * which implicitly releases the lock (if @ref RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL flag is not set).
1447  * User may allow the scheduler to release the lock earlier than that by invoking
1448  * rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation for each event from that flow.
1449  *
1450  * NOTE: Where multiple events from the same queue and atomic flow are scheduled to a port,
1451  * the lock for that flow is only released once the last event from the flow is released,
1452  * or forwarded to another queue. So long as there is at least one event from an atomic
1453  * flow scheduled to a port/core (including any events in the port's dequeue queue, not yet read
1454  * by the application), that port will hold the synchronization lock for that flow.
1455  *
1456  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1457  */
1458 
1459 #define RTE_SCHED_TYPE_PARALLEL         2
1460 /**< Parallel scheduling
1461  *
1462  * The scheduler performs priority scheduling, load balancing, etc. functions
1463  * but does not provide additional event synchronization or ordering.
1464  * It is free to schedule events from a single parallel flow of an event queue
1465  * to multiple events ports for concurrent processing.
1466  * The application is responsible for flow context synchronization and
1467  * event ordering (SW synchronization).
1468  *
1469  * @see rte_event_queue_setup(), rte_event_dequeue_burst()
1470  */
1471 
1472 /* Event types to classify the event source */
1473 #define RTE_EVENT_TYPE_ETHDEV           0x0
1474 /**< The event generated from ethdev subsystem */
1475 #define RTE_EVENT_TYPE_CRYPTODEV        0x1
1476 /**< The event generated from crypodev subsystem */
1477 #define RTE_EVENT_TYPE_TIMER		0x2
1478 /**< The event generated from event timer adapter */
1479 #define RTE_EVENT_TYPE_CPU              0x3
1480 /**< The event generated from cpu for pipelining.
1481  * Application may use *sub_event_type* to further classify the event
1482  */
1483 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER   0x4
1484 /**< The event generated from event eth Rx adapter */
1485 #define RTE_EVENT_TYPE_DMADEV           0x5
1486 /**< The event generated from dma subsystem */
1487 #define RTE_EVENT_TYPE_VECTOR           0x8
1488 /**< Indicates that event is a vector.
1489  * All vector event types should be a logical OR of EVENT_TYPE_VECTOR.
1490  * This simplifies the pipeline design as one can split processing the events
1491  * between vector events and normal event across event types.
1492  * Example:
1493  *	if (ev.event_type & RTE_EVENT_TYPE_VECTOR) {
1494  *		// Classify and handle vector event.
1495  *	} else {
1496  *		// Classify and handle event.
1497  *	}
1498  */
1499 #define RTE_EVENT_TYPE_ETHDEV_VECTOR                                           \
1500 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1501 /**< The event vector generated from ethdev subsystem */
1502 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1503 /**< The event vector generated from cpu for pipelining. */
1504 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR                                   \
1505 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1506 /**< The event vector generated from eth Rx adapter. */
1507 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
1508 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1509 /**< The event vector generated from cryptodev adapter. */
1510 
1511 #define RTE_EVENT_TYPE_MAX              0x10
1512 /**< Maximum number of event types */
1513 
1514 /* Event enqueue operations */
1515 #define RTE_EVENT_OP_NEW                0
1516 /**< The @ref rte_event.op field must be set to this operation type to inject a new event,
1517  * i.e. one not previously dequeued, into the event device, to be scheduled
1518  * for processing.
1519  */
1520 #define RTE_EVENT_OP_FORWARD            1
1521 /**< The application must set the @ref rte_event.op field to this operation type to return a
1522  * previously dequeued event to the event device to be scheduled for further processing.
1523  *
1524  * This event *must* be enqueued to the same port that the
1525  * event to be forwarded was dequeued from.
1526  *
1527  * The event's fields, including (but not limited to) flow_id, scheduling type,
1528  * destination queue, and event payload e.g. mbuf pointer, may all be updated as
1529  * desired by the application, but the @ref rte_event.impl_opaque field must
1530  * be kept to the same value as was present when the event was dequeued.
1531  */
1532 #define RTE_EVENT_OP_RELEASE            2
1533 /**< Release the flow context associated with the schedule type.
1534  *
1535  * If current flow's scheduler type method is @ref RTE_SCHED_TYPE_ATOMIC
1536  * then this operation type hints the scheduler that the user has completed critical
1537  * section processing for this event in the current atomic context, and that the
1538  * scheduler may unlock any atomic locks held for this event.
1539  * If this is the last event from an atomic flow, i.e. all flow locks are released
1540  * (see @ref RTE_SCHED_TYPE_ATOMIC for details), the scheduler is now allowed to
1541  * schedule events from that flow from to another port.
1542  * However, the atomic locks may be still held until the next rte_event_dequeue_burst()
1543  * call; enqueuing an event with opt type @ref RTE_EVENT_OP_RELEASE is a hint only,
1544  * allowing the scheduler to release the atomic locks early, but not requiring it to do so.
1545  *
1546  * Early atomic lock release may increase parallelism and thus system
1547  * performance, but the user needs to design carefully the split into critical
1548  * vs non-critical sections.
1549  *
1550  * If current flow's scheduler type method is @ref RTE_SCHED_TYPE_ORDERED
1551  * then this operation type informs the scheduler that the current event has
1552  * completed processing and will not be returned to the scheduler, i.e.
1553  * it has been dropped, and so the reordering context for that event
1554  * should be considered filled.
1555  *
1556  * Events with this operation type must only be enqueued to the same port that the
1557  * event to be released was dequeued from. The @ref rte_event.impl_opaque
1558  * field in the release event must have the same value as that in the original dequeued event.
1559  *
1560  * If a dequeued event is re-enqueued with operation type of @ref RTE_EVENT_OP_RELEASE,
1561  * then any subsequent enqueue of that event - or a copy of it - must be done as event of type
1562  * @ref RTE_EVENT_OP_NEW, not @ref RTE_EVENT_OP_FORWARD. This is because any context for
1563  * the originally dequeued event, i.e. atomic locks, or reorder buffer entries, will have
1564  * been removed or invalidated by the release operation.
1565  */
1566 
1567 /**
1568  * The generic *rte_event* structure to hold the event attributes
1569  * for dequeue and enqueue operation
1570  */
1571 struct rte_event {
1572 	/* WORD0 */
1573 	union {
1574 		uint64_t event;
1575 		/** Event attributes for dequeue or enqueue operation */
1576 		struct {
1577 			uint32_t flow_id:20;
1578 			/**< Target flow identifier for the enqueue and dequeue operation.
1579 			 *
1580 			 * For @ref RTE_SCHED_TYPE_ATOMIC, this field is used to identify a
1581 			 * flow for atomicity within a queue & priority level, such that events
1582 			 * from each individual flow will only be scheduled to one port at a time.
1583 			 *
1584 			 * This field is preserved between enqueue and dequeue when
1585 			 * a device reports the @ref RTE_EVENT_DEV_CAP_CARRY_FLOW_ID
1586 			 * capability. Otherwise the value is implementation dependent
1587 			 * on dequeue.
1588 			 */
1589 			uint32_t sub_event_type:8;
1590 			/**< Sub-event types based on the event source.
1591 			 *
1592 			 * This field is preserved between enqueue and dequeue.
1593 			 *
1594 			 * @see RTE_EVENT_TYPE_CPU
1595 			 */
1596 			uint32_t event_type:4;
1597 			/**< Event type to classify the event source. (RTE_EVENT_TYPE_*)
1598 			 *
1599 			 * This field is preserved between enqueue and dequeue
1600 			 */
1601 			uint8_t op:2;
1602 			/**< The type of event enqueue operation - new/forward/ etc.
1603 			 *
1604 			 * This field is *not* preserved across an instance
1605 			 * and is implementation dependent on dequeue.
1606 			 *
1607 			 * @see RTE_EVENT_OP_NEW
1608 			 * @see RTE_EVENT_OP_FORWARD
1609 			 * @see RTE_EVENT_OP_RELEASE
1610 			 */
1611 			uint8_t rsvd:4;
1612 			/**< Reserved for future use.
1613 			 *
1614 			 * Should be set to zero when initializing event structures.
1615 			 *
1616 			 * When forwarding or releasing existing events dequeued from the scheduler,
1617 			 * this field can be ignored.
1618 			 */
1619 			uint8_t sched_type:2;
1620 			/**< Scheduler synchronization type (RTE_SCHED_TYPE_*)
1621 			 * associated with flow id on a given event queue
1622 			 * for the enqueue and dequeue operation.
1623 			 *
1624 			 * This field is used to determine the scheduling type
1625 			 * for events sent to queues where @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES
1626 			 * is configured.
1627 			 * For queues where only a single scheduling type is available,
1628 			 * this field must be set to match the configured scheduling type.
1629 			 *
1630 			 * This field is preserved between enqueue and dequeue.
1631 			 *
1632 			 * @see RTE_SCHED_TYPE_ORDERED
1633 			 * @see RTE_SCHED_TYPE_ATOMIC
1634 			 * @see RTE_SCHED_TYPE_PARALLEL
1635 			 */
1636 			uint8_t queue_id;
1637 			/**< Targeted event queue identifier for the enqueue or
1638 			 * dequeue operation.
1639 			 * The value must be less than @ref rte_event_dev_config.nb_event_queues
1640 			 * which was previously supplied to rte_event_dev_configure().
1641 			 *
1642 			 * This field is preserved between enqueue on dequeue.
1643 			 */
1644 			uint8_t priority;
1645 			/**< Event priority relative to other events in the
1646 			 * event queue. The requested priority should in the
1647 			 * range of  [@ref RTE_EVENT_DEV_PRIORITY_HIGHEST,
1648 			 * @ref RTE_EVENT_DEV_PRIORITY_LOWEST].
1649 			 *
1650 			 * The implementation shall normalize the requested
1651 			 * priority to supported priority value.
1652 			 * [For devices with where the supported priority range is a power-of-2, the
1653 			 * normalization will be done via bit-shifting, so only the highest
1654 			 * log2(num_priorities) bits will be used by the event device]
1655 			 *
1656 			 * Valid when the device has @ref RTE_EVENT_DEV_CAP_EVENT_QOS capability
1657 			 * and this field is preserved between enqueue and dequeue,
1658 			 * though with possible loss of precision due to normalization and
1659 			 * subsequent de-normalization. (For example, if a device only supports 8
1660 			 * priority levels, only the high 3 bits of this field will be
1661 			 * used by that device, and hence only the value of those 3 bits are
1662 			 * guaranteed to be preserved between enqueue and dequeue.)
1663 			 *
1664 			 * Ignored when device does not support @ref RTE_EVENT_DEV_CAP_EVENT_QOS
1665 			 * capability, and it is implementation dependent if this field is preserved
1666 			 * between enqueue and dequeue.
1667 			 */
1668 			uint8_t impl_opaque;
1669 			/**< Opaque field for event device use.
1670 			 *
1671 			 * An event driver implementation may use this field to hold an
1672 			 * implementation specific value to share between
1673 			 * dequeue and enqueue operation.
1674 			 *
1675 			 * The application must not modify this field.
1676 			 * Its value is implementation dependent on dequeue,
1677 			 * and must be returned unmodified on enqueue when
1678 			 * op type is @ref RTE_EVENT_OP_FORWARD or @ref RTE_EVENT_OP_RELEASE.
1679 			 * This field is ignored on events with op type
1680 			 * @ref RTE_EVENT_OP_NEW.
1681 			 */
1682 		};
1683 	};
1684 	/* WORD1 */
1685 	union {
1686 		uint64_t u64;
1687 		/**< Opaque 64-bit value */
1688 		void *event_ptr;
1689 		/**< Opaque event pointer */
1690 		struct rte_mbuf *mbuf;
1691 		/**< mbuf pointer if dequeued event is associated with mbuf */
1692 		struct rte_event_vector *vec;
1693 		/**< Event vector pointer. */
1694 	};
1695 };
1696 
1697 /* Ethdev Rx adapter capability bitmap flags */
1698 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT	0x1
1699 /**< This flag is sent when the packet transfer mechanism is in HW.
1700  * Ethdev can send packets to the event device using internal event port.
1701  */
1702 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ	0x2
1703 /**< Adapter supports multiple event queues per ethdev. Every ethdev
1704  * Rx queue can be connected to a unique event queue.
1705  */
1706 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
1707 /**< The application can override the adapter generated flow ID in the
1708  * event. This flow ID can be specified when adding an ethdev Rx queue
1709  * to the adapter using the ev.flow_id member.
1710  * @see struct rte_event_eth_rx_adapter_queue_conf::ev
1711  * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
1712  */
1713 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR	0x8
1714 /**< Adapter supports event vectorization per ethdev. */
1715 
1716 /**
1717  * Retrieve the event device's ethdev Rx adapter capabilities for the
1718  * specified ethernet port
1719  *
1720  * @param dev_id
1721  *   The identifier of the device.
1722  *
1723  * @param eth_port_id
1724  *   The identifier of the ethernet device.
1725  *
1726  * @param[out] caps
1727  *   A pointer to memory filled with Rx event adapter capabilities.
1728  *
1729  * @return
1730  *   - 0: Success, driver provides Rx event adapter capabilities for the
1731  *	ethernet device.
1732  *   - <0: Error code returned by the driver function.
1733  */
1734 int
1735 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1736 				uint32_t *caps);
1737 
1738 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1739 /**< This flag is set when the timer mechanism is in HW. */
1740 
1741 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC      (1ULL << 1)
1742 /**< This flag is set if periodic mode is supported. */
1743 
1744 /**
1745  * Retrieve the event device's timer adapter capabilities.
1746  *
1747  * @param dev_id
1748  *   The identifier of the device.
1749  *
1750  * @param[out] caps
1751  *   A pointer to memory to be filled with event timer adapter capabilities.
1752  *
1753  * @return
1754  *   - 0: Success, driver provided event timer adapter capabilities.
1755  *   - <0: Error code returned by the driver function.
1756  */
1757 int
1758 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1759 
1760 /* Crypto adapter capability bitmap flag */
1761 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW   0x1
1762 /**< Flag indicates HW is capable of generating events in
1763  * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send
1764  * packets to the event device as new events using an internal
1765  * event port.
1766  */
1767 
1768 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD   0x2
1769 /**< Flag indicates HW is capable of generating events in
1770  * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send
1771  * packets to the event device as forwarded event using an
1772  * internal event port.
1773  */
1774 
1775 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND  0x4
1776 /**< Flag indicates HW is capable of mapping crypto queue pair to
1777  * event queue.
1778  */
1779 
1780 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA   0x8
1781 /**< Flag indicates HW/SW supports a mechanism to store and retrieve
1782  * the private data information along with the crypto session.
1783  */
1784 
1785 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
1786 /**< Flag indicates HW is capable of aggregating processed
1787  * crypto operations into rte_event_vector.
1788  */
1789 
1790 /**
1791  * Retrieve the event device's crypto adapter capabilities for the
1792  * specified cryptodev device
1793  *
1794  * @param dev_id
1795  *   The identifier of the device.
1796  *
1797  * @param cdev_id
1798  *   The identifier of the cryptodev device.
1799  *
1800  * @param[out] caps
1801  *   A pointer to memory filled with event adapter capabilities.
1802  *   It is expected to be pre-allocated & initialized by caller.
1803  *
1804  * @return
1805  *   - 0: Success, driver provides event adapter capabilities for the
1806  *     cryptodev device.
1807  *   - <0: Error code returned by the driver function.
1808  */
1809 int
1810 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1811 				  uint32_t *caps);
1812 
1813 /* DMA adapter capability bitmap flag */
1814 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1815 /**< Flag indicates HW is capable of generating events in
1816  * RTE_EVENT_OP_NEW enqueue operation. DMADEV will send
1817  * packets to the event device as new events using an
1818  * internal event port.
1819  */
1820 
1821 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1822 /**< Flag indicates HW is capable of generating events in
1823  * RTE_EVENT_OP_FORWARD enqueue operation. DMADEV will send
1824  * packets to the event device as forwarded event using an
1825  * internal event port.
1826  */
1827 
1828 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1829 /**< Flag indicates HW is capable of mapping DMA vchan to event queue. */
1830 
1831 /**
1832  * Retrieve the event device's DMA adapter capabilities for the
1833  * specified dmadev device
1834  *
1835  * @param dev_id
1836  *   The identifier of the device.
1837  *
1838  * @param dmadev_id
1839  *   The identifier of the dmadev device.
1840  *
1841  * @param[out] caps
1842  *   A pointer to memory filled with event adapter capabilities.
1843  *   It is expected to be pre-allocated & initialized by caller.
1844  *
1845  * @return
1846  *   - 0: Success, driver provides event adapter capabilities for the
1847  *     dmadev device.
1848  *   - <0: Error code returned by the driver function.
1849  *
1850  */
1851 __rte_experimental
1852 int
1853 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1854 
1855 /* Ethdev Tx adapter capability bitmap flags */
1856 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT	0x1
1857 /**< This flag is sent when the PMD supports a packet transmit callback
1858  */
1859 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR	0x2
1860 /**< Indicates that the Tx adapter is capable of handling event vector of
1861  * mbufs.
1862  */
1863 
1864 /**
1865  * Retrieve the event device's eth Tx adapter capabilities
1866  *
1867  * @param dev_id
1868  *   The identifier of the device.
1869  *
1870  * @param eth_port_id
1871  *   The identifier of the ethernet device.
1872  *
1873  * @param[out] caps
1874  *   A pointer to memory filled with eth Tx adapter capabilities.
1875  *
1876  * @return
1877  *   - 0: Success, driver provides eth Tx adapter capabilities.
1878  *   - <0: Error code returned by the driver function.
1879  */
1880 int
1881 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1882 				uint32_t *caps);
1883 
1884 /**
1885  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
1886  *
1887  * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag
1888  * then application can use this function to convert timeout value in
1889  * nanoseconds to implementations specific timeout value supplied in
1890  * rte_event_dequeue_burst()
1891  *
1892  * @param dev_id
1893  *   The identifier of the device.
1894  * @param ns
1895  *   Wait time in nanosecond
1896  * @param[out] timeout_ticks
1897  *   Value for the *timeout_ticks* parameter in rte_event_dequeue_burst()
1898  *
1899  * @return
1900  *  - 0 on success.
1901  *  - -ENOTSUP if the device doesn't support timeouts
1902  *  - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL
1903  *  - other values < 0 on failure.
1904  *
1905  * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
1906  * @see rte_event_dev_configure()
1907  */
1908 int
1909 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1910 					uint64_t *timeout_ticks);
1911 
1912 /**
1913  * Link multiple source event queues supplied in *queues* to the destination
1914  * event port designated by its *port_id* with associated service priority
1915  * supplied in *priorities* on the event device designated by its *dev_id*.
1916  *
1917  * The link establishment shall enable the event port *port_id* from
1918  * receiving events from the specified event queue(s) supplied in *queues*
1919  *
1920  * An event queue may link to one or more event ports.
1921  * The number of links can be established from an event queue to event port is
1922  * implementation defined.
1923  *
1924  * Event queue(s) to event port link establishment can be changed at runtime
1925  * without re-configuring the device to support scaling and to reduce the
1926  * latency of critical work by establishing the link with more event ports
1927  * at runtime.
1928  *
1929  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
1930  * than or equal to one, this function links the event queues to the default
1931  * profile_id i.e. profile_id 0 of the event port.
1932  *
1933  * @param dev_id
1934  *   The identifier of the device.
1935  *
1936  * @param port_id
1937  *   Event port identifier to select the destination port to link.
1938  *
1939  * @param queues
1940  *   Points to an array of *nb_links* event queues to be linked
1941  *   to the event port.
1942  *   NULL value is allowed, in which case this function links all the configured
1943  *   event queues *nb_event_queues* which previously supplied to
1944  *   rte_event_dev_configure() to the event port *port_id*
1945  *
1946  * @param priorities
1947  *   Points to an array of *nb_links* service priorities associated with each
1948  *   event queue link to event port.
1949  *   The priority defines the event port's servicing priority for
1950  *   event queue, which may be ignored by an implementation.
1951  *   The requested priority should in the range of
1952  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
1953  *   The implementation shall normalize the requested priority to
1954  *   implementation supported priority value.
1955  *   NULL value is allowed, in which case this function links the event queues
1956  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
1957  *
1958  * @param nb_links
1959  *   The number of links to establish. This parameter is ignored if queues is
1960  *   NULL.
1961  *
1962  * @return
1963  * The number of links actually established. The return value can be less than
1964  * the value of the *nb_links* parameter when the implementation has the
1965  * limitation on specific queue to port link establishment or if invalid
1966  * parameters are specified in *queues*
1967  * If the return value is less than *nb_links*, the remaining links at the end
1968  * of link[] are not established, and the caller has to take care of them.
1969  * If return value is less than *nb_links* then implementation shall update the
1970  * rte_errno accordingly, Possible rte_errno values are
1971  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
1972  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1973  * (EINVAL) Invalid parameter
1974  */
1975 int
1976 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1977 		    const uint8_t queues[], const uint8_t priorities[],
1978 		    uint16_t nb_links);
1979 
1980 /**
1981  * Unlink multiple source event queues supplied in *queues* from the destination
1982  * event port designated by its *port_id* on the event device designated
1983  * by its *dev_id*.
1984  *
1985  * The unlink call issues an async request to disable the event port *port_id*
1986  * from receiving events from the specified event queue *queue_id*.
1987  * Event queue(s) to event port unlink establishment can be changed at runtime
1988  * without re-configuring the device.
1989  *
1990  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
1991  * than or equal to one, this function unlinks the event queues from the default
1992  * profile identifier i.e. profile 0 of the event port.
1993  *
1994  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
1995  *
1996  * @param dev_id
1997  *   The identifier of the device.
1998  *
1999  * @param port_id
2000  *   Event port identifier to select the destination port to unlink.
2001  *
2002  * @param queues
2003  *   Points to an array of *nb_unlinks* event queues to be unlinked
2004  *   from the event port.
2005  *   NULL value is allowed, in which case this function unlinks all the
2006  *   event queue(s) from the event port *port_id*.
2007  *
2008  * @param nb_unlinks
2009  *   The number of unlinks to establish. This parameter is ignored if queues is
2010  *   NULL.
2011  *
2012  * @return
2013  * The number of unlinks successfully requested. The return value can be less
2014  * than the value of the *nb_unlinks* parameter when the implementation has the
2015  * limitation on specific queue to port unlink establishment or
2016  * if invalid parameters are specified.
2017  * If the return value is less than *nb_unlinks*, the remaining queues at the
2018  * end of queues[] are not unlinked, and the caller has to take care of them.
2019  * If return value is less than *nb_unlinks* then implementation shall update
2020  * the rte_errno accordingly, Possible rte_errno values are
2021  * (EINVAL) Invalid parameter
2022  */
2023 int
2024 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
2025 		      uint8_t queues[], uint16_t nb_unlinks);
2026 
2027 /**
2028  * Link multiple source event queues supplied in *queues* to the destination
2029  * event port designated by its *port_id* with associated profile identifier
2030  * supplied in *profile_id* with service priorities supplied in *priorities*
2031  * on the event device designated by its *dev_id*.
2032  *
2033  * If *profile_id* is set to 0 then, the links created by the call `rte_event_port_link`
2034  * will be overwritten.
2035  *
2036  * Event ports by default use profile_id 0 unless it is changed using the
2037  * call ``rte_event_port_profile_switch()``.
2038  *
2039  * The link establishment shall enable the event port *port_id* from
2040  * receiving events from the specified event queue(s) supplied in *queues*
2041  *
2042  * An event queue may link to one or more event ports.
2043  * The number of links can be established from an event queue to event port is
2044  * implementation defined.
2045  *
2046  * Event queue(s) to event port link establishment can be changed at runtime
2047  * without re-configuring the device to support scaling and to reduce the
2048  * latency of critical work by establishing the link with more event ports
2049  * at runtime.
2050  *
2051  * @param dev_id
2052  *   The identifier of the device.
2053  *
2054  * @param port_id
2055  *   Event port identifier to select the destination port to link.
2056  *
2057  * @param queues
2058  *   Points to an array of *nb_links* event queues to be linked
2059  *   to the event port.
2060  *   NULL value is allowed, in which case this function links all the configured
2061  *   event queues *nb_event_queues* which previously supplied to
2062  *   rte_event_dev_configure() to the event port *port_id*
2063  *
2064  * @param priorities
2065  *   Points to an array of *nb_links* service priorities associated with each
2066  *   event queue link to event port.
2067  *   The priority defines the event port's servicing priority for
2068  *   event queue, which may be ignored by an implementation.
2069  *   The requested priority should in the range of
2070  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
2071  *   The implementation shall normalize the requested priority to
2072  *   implementation supported priority value.
2073  *   NULL value is allowed, in which case this function links the event queues
2074  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
2075  *
2076  * @param nb_links
2077  *   The number of links to establish. This parameter is ignored if queues is
2078  *   NULL.
2079  *
2080  * @param profile_id
2081  *   The profile identifier associated with the links between event queues and
2082  *   event port. Should be less than the max capability reported by
2083  *   ``rte_event_dev_info::max_profiles_per_port``
2084  *
2085  * @return
2086  * The number of links actually established. The return value can be less than
2087  * the value of the *nb_links* parameter when the implementation has the
2088  * limitation on specific queue to port link establishment or if invalid
2089  * parameters are specified in *queues*
2090  * If the return value is less than *nb_links*, the remaining links at the end
2091  * of link[] are not established, and the caller has to take care of them.
2092  * If return value is less than *nb_links* then implementation shall update the
2093  * rte_errno accordingly, Possible rte_errno values are
2094  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
2095  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
2096  * (EINVAL) Invalid parameter
2097  *
2098  */
2099 __rte_experimental
2100 int
2101 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
2102 				 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
2103 
2104 /**
2105  * Unlink multiple source event queues supplied in *queues* that belong to profile
2106  * designated by *profile_id* from the destination event port designated by its
2107  * *port_id* on the event device designated by its *dev_id*.
2108  *
2109  * If *profile_id* is set to 0 i.e., the default profile then, then this function
2110  * will act as ``rte_event_port_unlink``.
2111  *
2112  * The unlink call issues an async request to disable the event port *port_id*
2113  * from receiving events from the specified event queue *queue_id*.
2114  * Event queue(s) to event port unlink establishment can be changed at runtime
2115  * without re-configuring the device.
2116  *
2117  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
2118  *
2119  * @param dev_id
2120  *   The identifier of the device.
2121  *
2122  * @param port_id
2123  *   Event port identifier to select the destination port to unlink.
2124  *
2125  * @param queues
2126  *   Points to an array of *nb_unlinks* event queues to be unlinked
2127  *   from the event port.
2128  *   NULL value is allowed, in which case this function unlinks all the
2129  *   event queue(s) from the event port *port_id*.
2130  *
2131  * @param nb_unlinks
2132  *   The number of unlinks to establish. This parameter is ignored if queues is
2133  *   NULL.
2134  *
2135  * @param profile_id
2136  *   The profile identifier associated with the links between event queues and
2137  *   event port. Should be less than the max capability reported by
2138  *   ``rte_event_dev_info::max_profiles_per_port``
2139  *
2140  * @return
2141  * The number of unlinks successfully requested. The return value can be less
2142  * than the value of the *nb_unlinks* parameter when the implementation has the
2143  * limitation on specific queue to port unlink establishment or
2144  * if invalid parameters are specified.
2145  * If the return value is less than *nb_unlinks*, the remaining queues at the
2146  * end of queues[] are not unlinked, and the caller has to take care of them.
2147  * If return value is less than *nb_unlinks* then implementation shall update
2148  * the rte_errno accordingly, Possible rte_errno values are
2149  * (EINVAL) Invalid parameter
2150  *
2151  */
2152 __rte_experimental
2153 int
2154 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2155 			      uint16_t nb_unlinks, uint8_t profile_id);
2156 
2157 /**
2158  * Returns the number of unlinks in progress.
2159  *
2160  * This function provides the application with a method to detect when an
2161  * unlink has been completed by the implementation.
2162  *
2163  * @see rte_event_port_unlink() to issue unlink requests.
2164  *
2165  * @param dev_id
2166  *   The identifier of the device.
2167  *
2168  * @param port_id
2169  *   Event port identifier to select port to check for unlinks in progress.
2170  *
2171  * @return
2172  * The number of unlinks that are in progress. A return of zero indicates that
2173  * there are no outstanding unlink requests. A positive return value indicates
2174  * the number of unlinks that are in progress, but are not yet complete.
2175  * A negative return value indicates an error, -EINVAL indicates an invalid
2176  * parameter passed for *dev_id* or *port_id*.
2177  */
2178 int
2179 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
2180 
2181 /**
2182  * Retrieve the list of source event queues and its associated service priority
2183  * linked to the destination event port designated by its *port_id*
2184  * on the event device designated by its *dev_id*.
2185  *
2186  * @param dev_id
2187  *   The identifier of the device.
2188  *
2189  * @param port_id
2190  *   Event port identifier.
2191  *
2192  * @param[out] queues
2193  *   Points to an array of *queues* for output.
2194  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2195  *   store the event queue(s) linked with event port *port_id*
2196  *
2197  * @param[out] priorities
2198  *   Points to an array of *priorities* for output.
2199  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2200  *   store the service priority associated with each event queue linked
2201  *
2202  * @return
2203  * The number of links established on the event port designated by its
2204  *  *port_id*.
2205  * - <0 on failure.
2206  */
2207 int
2208 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
2209 			 uint8_t queues[], uint8_t priorities[]);
2210 
2211 /**
2212  * Retrieve the list of source event queues and its service priority
2213  * associated to a *profile_id* and linked to the destination event port
2214  * designated by its *port_id* on the event device designated by its *dev_id*.
2215  *
2216  * @param dev_id
2217  *   The identifier of the device.
2218  *
2219  * @param port_id
2220  *   Event port identifier.
2221  *
2222  * @param[out] queues
2223  *   Points to an array of *queues* for output.
2224  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2225  *   store the event queue(s) linked with event port *port_id*
2226  *
2227  * @param[out] priorities
2228  *   Points to an array of *priorities* for output.
2229  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2230  *   store the service priority associated with each event queue linked
2231  *
2232  * @param profile_id
2233  *   The profile identifier associated with the links between event queues and
2234  *   event port. Should be less than the max capability reported by
2235  *   ``rte_event_dev_info::max_profiles_per_port``
2236  *
2237  * @return
2238  * The number of links established on the event port designated by its
2239  *  *port_id*.
2240  * - <0 on failure.
2241  */
2242 __rte_experimental
2243 int
2244 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2245 				 uint8_t priorities[], uint8_t profile_id);
2246 
2247 /**
2248  * Retrieve the service ID of the event dev. If the adapter doesn't use
2249  * a rte_service function, this function returns -ESRCH.
2250  *
2251  * @param dev_id
2252  *   The identifier of the device.
2253  *
2254  * @param [out] service_id
2255  *   A pointer to a uint32_t, to be filled in with the service id.
2256  *
2257  * @return
2258  *   - 0: Success
2259  *   - <0: Error code on failure, if the event dev doesn't use a rte_service
2260  *   function, this function returns -ESRCH.
2261  */
2262 int
2263 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
2264 
2265 /**
2266  * Dump internal information about *dev_id* to the FILE* provided in *f*.
2267  *
2268  * @param dev_id
2269  *   The identifier of the device.
2270  *
2271  * @param f
2272  *   A pointer to a file for output
2273  *
2274  * @return
2275  *   - 0: on success
2276  *   - <0: on failure.
2277  */
2278 int
2279 rte_event_dev_dump(uint8_t dev_id, FILE *f);
2280 
2281 /** Maximum name length for extended statistics counters */
2282 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
2283 
2284 /**
2285  * Selects the component of the eventdev to retrieve statistics from.
2286  */
2287 enum rte_event_dev_xstats_mode {
2288 	RTE_EVENT_DEV_XSTATS_DEVICE,
2289 	RTE_EVENT_DEV_XSTATS_PORT,
2290 	RTE_EVENT_DEV_XSTATS_QUEUE,
2291 };
2292 
2293 /**
2294  * A name-key lookup element for extended statistics.
2295  *
2296  * This structure is used to map between names and ID numbers
2297  * for extended ethdev statistics.
2298  */
2299 struct rte_event_dev_xstats_name {
2300 	char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
2301 };
2302 
2303 /**
2304  * Retrieve names of extended statistics of an event device.
2305  *
2306  * @param dev_id
2307  *   The identifier of the event device.
2308  * @param mode
2309  *   The mode of statistics to retrieve. Choices include the device statistics,
2310  *   port statistics or queue statistics.
2311  * @param queue_port_id
2312  *   Used to specify the port or queue number in queue or port mode, and is
2313  *   ignored in device mode.
2314  * @param[out] xstats_names
2315  *   Block of memory to insert names into. Must be at least size in capacity.
2316  *   If set to NULL, function returns required capacity.
2317  * @param[out] ids
2318  *   Block of memory to insert ids into. Must be at least size in capacity.
2319  *   If set to NULL, function returns required capacity. The id values returned
2320  *   can be passed to *rte_event_dev_xstats_get* to select statistics.
2321  * @param size
2322  *   Capacity of xstats_names (number of names).
2323  * @return
2324  *   - positive value lower or equal to size: success. The return value
2325  *     is the number of entries filled in the stats table.
2326  *   - positive value higher than size: error, the given statistics table
2327  *     is too small. The return value corresponds to the size that should
2328  *     be given to succeed. The entries in the table are not valid and
2329  *     shall not be used by the caller.
2330  *   - negative value on error:
2331  *        -ENODEV for invalid *dev_id*
2332  *        -EINVAL for invalid mode, queue port or id parameters
2333  *        -ENOTSUP if the device doesn't support this function.
2334  */
2335 int
2336 rte_event_dev_xstats_names_get(uint8_t dev_id,
2337 			       enum rte_event_dev_xstats_mode mode,
2338 			       uint8_t queue_port_id,
2339 			       struct rte_event_dev_xstats_name *xstats_names,
2340 			       uint64_t *ids,
2341 			       unsigned int size);
2342 
2343 /**
2344  * Retrieve extended statistics of an event device.
2345  *
2346  * @param dev_id
2347  *   The identifier of the device.
2348  * @param mode
2349  *  The mode of statistics to retrieve. Choices include the device statistics,
2350  *  port statistics or queue statistics.
2351  * @param queue_port_id
2352  *   Used to specify the port or queue number in queue or port mode, and is
2353  *   ignored in device mode.
2354  * @param ids
2355  *   The id numbers of the stats to get. The ids can be got from the stat
2356  *   position in the stat list from rte_event_dev_get_xstats_names(), or
2357  *   by using rte_event_dev_xstats_by_name_get().
2358  * @param[out] values
2359  *   The values for each stats request by ID.
2360  * @param n
2361  *   The number of stats requested
2362  * @return
2363  *   - positive value: number of stat entries filled into the values array
2364  *   - negative value on error:
2365  *        -ENODEV for invalid *dev_id*
2366  *        -EINVAL for invalid mode, queue port or id parameters
2367  *        -ENOTSUP if the device doesn't support this function.
2368  */
2369 int
2370 rte_event_dev_xstats_get(uint8_t dev_id,
2371 			 enum rte_event_dev_xstats_mode mode,
2372 			 uint8_t queue_port_id,
2373 			 const uint64_t ids[],
2374 			 uint64_t values[], unsigned int n);
2375 
2376 /**
2377  * Retrieve the value of a single stat by requesting it by name.
2378  *
2379  * @param dev_id
2380  *   The identifier of the device
2381  * @param name
2382  *   The stat name to retrieve
2383  * @param[out] id
2384  *   If non-NULL, the numerical id of the stat will be returned, so that further
2385  *   requests for the stat can be got using rte_event_dev_xstats_get, which will
2386  *   be faster as it doesn't need to scan a list of names for the stat.
2387  *   If the stat cannot be found, the id returned will be (unsigned)-1.
2388  * @return
2389  *   - positive value or zero: the stat value
2390  *   - negative value: -EINVAL if stat not found, -ENOTSUP if not supported.
2391  */
2392 uint64_t
2393 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2394 				 uint64_t *id);
2395 
2396 /**
2397  * Reset the values of the xstats of the selected component in the device.
2398  *
2399  * @param dev_id
2400  *   The identifier of the device
2401  * @param mode
2402  *   The mode of the statistics to reset. Choose from device, queue or port.
2403  * @param queue_port_id
2404  *   The queue or port to reset. 0 and positive values select ports and queues,
2405  *   while -1 indicates all ports or queues.
2406  * @param ids
2407  *   Selects specific statistics to be reset. When NULL, all statistics selected
2408  *   by *mode* will be reset. If non-NULL, must point to array of at least
2409  *   *nb_ids* size.
2410  * @param nb_ids
2411  *   The number of ids available from the *ids* array. Ignored when ids is NULL.
2412  * @return
2413  *   - zero: successfully reset the statistics to zero
2414  *   - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported.
2415  */
2416 int
2417 rte_event_dev_xstats_reset(uint8_t dev_id,
2418 			   enum rte_event_dev_xstats_mode mode,
2419 			   int16_t queue_port_id,
2420 			   const uint64_t ids[],
2421 			   uint32_t nb_ids);
2422 
2423 /**
2424  * Trigger the eventdev self test.
2425  *
2426  * @param dev_id
2427  *   The identifier of the device
2428  * @return
2429  *   - 0: Selftest successful
2430  *   - -ENOTSUP if the device doesn't support selftest
2431  *   - other values < 0 on failure.
2432  */
2433 int rte_event_dev_selftest(uint8_t dev_id);
2434 
2435 /**
2436  * Get the memory required per event vector based on the number of elements per
2437  * vector.
2438  * This should be used to create the mempool that holds the event vectors.
2439  *
2440  * @param name
2441  *   The name of the vector pool.
2442  * @param n
2443  *   The number of elements in the mbuf pool.
2444  * @param cache_size
2445  *   Size of the per-core object cache. See rte_mempool_create() for
2446  *   details.
2447  * @param nb_elem
2448  *   The number of elements that a single event vector should be able to hold.
2449  * @param socket_id
2450  *   The socket identifier where the memory should be allocated. The
2451  *   value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the
2452  *   reserved zone
2453  *
2454  * @return
2455  *   The pointer to the newly allocated mempool, on success. NULL on error
2456  *   with rte_errno set appropriately. Possible rte_errno values include:
2457  *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
2458  *    - E_RTE_SECONDARY - function was called from a secondary process instance
2459  *    - EINVAL - cache size provided is too large, or priv_size is not aligned.
2460  *    - ENOSPC - the maximum number of memzones has already been allocated
2461  *    - EEXIST - a memzone with the same name already exists
2462  *    - ENOMEM - no appropriate memory area found in which to create memzone
2463  *    - ENAMETOOLONG - mempool name requested is too long.
2464  */
2465 struct rte_mempool *
2466 rte_event_vector_pool_create(const char *name, unsigned int n,
2467 			     unsigned int cache_size, uint16_t nb_elem,
2468 			     int socket_id);
2469 
2470 #include <rte_eventdev_core.h>
2471 
2472 static __rte_always_inline uint16_t
2473 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2474 			  const struct rte_event ev[], uint16_t nb_events,
2475 			  const event_enqueue_burst_t fn)
2476 {
2477 	const struct rte_event_fp_ops *fp_ops;
2478 	void *port;
2479 
2480 	fp_ops = &rte_event_fp_ops[dev_id];
2481 	port = fp_ops->data[port_id];
2482 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2483 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2484 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2485 		rte_errno = EINVAL;
2486 		return 0;
2487 	}
2488 
2489 	if (port == NULL) {
2490 		rte_errno = EINVAL;
2491 		return 0;
2492 	}
2493 #endif
2494 	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2495 	/*
2496 	 * Allow zero cost non burst mode routine invocation if application
2497 	 * requests nb_events as const one
2498 	 */
2499 	if (nb_events == 1)
2500 		return (fp_ops->enqueue)(port, ev);
2501 	else
2502 		return fn(port, ev, nb_events);
2503 }
2504 
2505 /**
2506  * Enqueue a burst of events objects or an event object supplied in *rte_event*
2507  * structure on an  event device designated by its *dev_id* through the event
2508  * port specified by *port_id*. Each event object specifies the event queue on
2509  * which it will be enqueued.
2510  *
2511  * The *nb_events* parameter is the number of event objects to enqueue which are
2512  * supplied in the *ev* array of *rte_event* structure.
2513  *
2514  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2515  * enqueued to the same port that their associated events were dequeued from.
2516  *
2517  * The rte_event_enqueue_burst() function returns the number of
2518  * events objects it actually enqueued. A return value equal to *nb_events*
2519  * means that all event objects have been enqueued.
2520  *
2521  * @param dev_id
2522  *   The identifier of the device.
2523  * @param port_id
2524  *   The identifier of the event port.
2525  * @param ev
2526  *   Points to an array of *nb_events* objects of type *rte_event* structure
2527  *   which contain the event object enqueue operations to be processed.
2528  * @param nb_events
2529  *   The number of event objects to enqueue, typically number of
2530  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2531  *   available for this port.
2532  *
2533  * @return
2534  *   The number of event objects actually enqueued on the event device. The
2535  *   return value can be less than the value of the *nb_events* parameter when
2536  *   the event devices queue is full or if invalid parameters are specified in a
2537  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2538  *   events at the end of ev[] are not consumed and the caller has to take care
2539  *   of them, and rte_errno is set accordingly. Possible errno values include:
2540  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2541  *              ID is invalid, or an event's sched type doesn't match the
2542  *              capabilities of the destination queue.
2543  *   - ENOSPC   The event port was backpressured and unable to enqueue
2544  *              one or more events. This error code is only applicable to
2545  *              closed systems.
2546  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2547  */
2548 static inline uint16_t
2549 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2550 			const struct rte_event ev[], uint16_t nb_events)
2551 {
2552 	const struct rte_event_fp_ops *fp_ops;
2553 
2554 	fp_ops = &rte_event_fp_ops[dev_id];
2555 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2556 					 fp_ops->enqueue_burst);
2557 }
2558 
2559 /**
2560  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
2561  * an event device designated by its *dev_id* through the event port specified
2562  * by *port_id*.
2563  *
2564  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2565  * application can use this API when the all objects in the burst contains
2566  * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
2567  * function can provide the additional hint to the PMD and optimize if possible.
2568  *
2569  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2570  * has event object of operation type != RTE_EVENT_OP_NEW.
2571  *
2572  * @param dev_id
2573  *   The identifier of the device.
2574  * @param port_id
2575  *   The identifier of the event port.
2576  * @param ev
2577  *   Points to an array of *nb_events* objects of type *rte_event* structure
2578  *   which contain the event object enqueue operations to be processed.
2579  * @param nb_events
2580  *   The number of event objects to enqueue, typically number of
2581  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2582  *   available for this port.
2583  *
2584  * @return
2585  *   The number of event objects actually enqueued on the event device. The
2586  *   return value can be less than the value of the *nb_events* parameter when
2587  *   the event devices queue is full or if invalid parameters are specified in a
2588  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2589  *   events at the end of ev[] are not consumed and the caller has to take care
2590  *   of them, and rte_errno is set accordingly. Possible errno values include:
2591  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2592  *              ID is invalid, or an event's sched type doesn't match the
2593  *              capabilities of the destination queue.
2594  *   - ENOSPC   The event port was backpressured and unable to enqueue
2595  *              one or more events. This error code is only applicable to
2596  *              closed systems.
2597  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2598  * @see rte_event_enqueue_burst()
2599  */
2600 static inline uint16_t
2601 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2602 			    const struct rte_event ev[], uint16_t nb_events)
2603 {
2604 	const struct rte_event_fp_ops *fp_ops;
2605 
2606 	fp_ops = &rte_event_fp_ops[dev_id];
2607 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2608 					 fp_ops->enqueue_new_burst);
2609 }
2610 
2611 /**
2612  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
2613  * on an event device designated by its *dev_id* through the event port
2614  * specified by *port_id*.
2615  *
2616  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2617  * application can use this API when the all objects in the burst contains
2618  * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
2619  * function can provide the additional hint to the PMD and optimize if possible.
2620  *
2621  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2622  * has event object of operation type != RTE_EVENT_OP_FORWARD.
2623  *
2624  * @param dev_id
2625  *   The identifier of the device.
2626  * @param port_id
2627  *   The identifier of the event port.
2628  * @param ev
2629  *   Points to an array of *nb_events* objects of type *rte_event* structure
2630  *   which contain the event object enqueue operations to be processed.
2631  * @param nb_events
2632  *   The number of event objects to enqueue, typically number of
2633  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2634  *   available for this port.
2635  *
2636  * @return
2637  *   The number of event objects actually enqueued on the event device. The
2638  *   return value can be less than the value of the *nb_events* parameter when
2639  *   the event devices queue is full or if invalid parameters are specified in a
2640  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2641  *   events at the end of ev[] are not consumed and the caller has to take care
2642  *   of them, and rte_errno is set accordingly. Possible errno values include:
2643  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2644  *              ID is invalid, or an event's sched type doesn't match the
2645  *              capabilities of the destination queue.
2646  *   - ENOSPC   The event port was backpressured and unable to enqueue
2647  *              one or more events. This error code is only applicable to
2648  *              closed systems.
2649  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2650  * @see rte_event_enqueue_burst()
2651  */
2652 static inline uint16_t
2653 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2654 				const struct rte_event ev[], uint16_t nb_events)
2655 {
2656 	const struct rte_event_fp_ops *fp_ops;
2657 
2658 	fp_ops = &rte_event_fp_ops[dev_id];
2659 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2660 					 fp_ops->enqueue_forward_burst);
2661 }
2662 
2663 /**
2664  * Dequeue a burst of events objects or an event object from the event port
2665  * designated by its *event_port_id*, on an event device designated
2666  * by its *dev_id*.
2667  *
2668  * rte_event_dequeue_burst() does not dictate the specifics of scheduling
2669  * algorithm as each eventdev driver may have different criteria to schedule
2670  * an event. However, in general, from an application perspective scheduler may
2671  * use the following scheme to dispatch an event to the port.
2672  *
2673  * 1) Selection of event queue based on
2674  *   a) The list of event queues are linked to the event port.
2675  *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
2676  *   queue selection from list is based on event queue priority relative to
2677  *   other event queue supplied as *priority* in rte_event_queue_setup()
2678  *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
2679  *   queue selection from the list is based on event priority supplied as
2680  *   *priority* in rte_event_enqueue_burst()
2681  * 2) Selection of event
2682  *   a) The number of flows available in selected event queue.
2683  *   b) Schedule type method associated with the event
2684  *
2685  * The *nb_events* parameter is the maximum number of event objects to dequeue
2686  * which are returned in the *ev* array of *rte_event* structure.
2687  *
2688  * The rte_event_dequeue_burst() function returns the number of events objects
2689  * it actually dequeued. A return value equal to *nb_events* means that all
2690  * event objects have been dequeued.
2691  *
2692  * The number of events dequeued is the number of scheduler contexts held by
2693  * this port. These contexts are automatically released in the next
2694  * rte_event_dequeue_burst() invocation if the port supports implicit
2695  * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
2696  * operation can be used to release the contexts early.
2697  *
2698  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2699  * enqueued to the same port that their associated events were dequeued from.
2700  *
2701  * @param dev_id
2702  *   The identifier of the device.
2703  * @param port_id
2704  *   The identifier of the event port.
2705  * @param[out] ev
2706  *   Points to an array of *nb_events* objects of type *rte_event* structure
2707  *   for output to be populated with the dequeued event objects.
2708  * @param nb_events
2709  *   The maximum number of event objects to dequeue, typically number of
2710  *   rte_event_port_dequeue_depth() available for this port.
2711  *
2712  * @param timeout_ticks
2713  *   - 0 no-wait, returns immediately if there is no event.
2714  *   - >0 wait for the event, if the device is configured with
2715  *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
2716  *   at least one event is available or *timeout_ticks* time.
2717  *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
2718  *   then this function will wait until the event available or
2719  *   *dequeue_timeout_ns* ns which was previously supplied to
2720  *   rte_event_dev_configure()
2721  *
2722  * @return
2723  * The number of event objects actually dequeued from the port. The return
2724  * value can be less than the value of the *nb_events* parameter when the
2725  * event port's queue is not full.
2726  *
2727  * @see rte_event_port_dequeue_depth()
2728  */
2729 static inline uint16_t
2730 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2731 			uint16_t nb_events, uint64_t timeout_ticks)
2732 {
2733 	const struct rte_event_fp_ops *fp_ops;
2734 	void *port;
2735 
2736 	fp_ops = &rte_event_fp_ops[dev_id];
2737 	port = fp_ops->data[port_id];
2738 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2739 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2740 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2741 		rte_errno = EINVAL;
2742 		return 0;
2743 	}
2744 
2745 	if (port == NULL) {
2746 		rte_errno = EINVAL;
2747 		return 0;
2748 	}
2749 #endif
2750 	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2751 	/*
2752 	 * Allow zero cost non burst mode routine invocation if application
2753 	 * requests nb_events as const one
2754 	 */
2755 	if (nb_events == 1)
2756 		return (fp_ops->dequeue)(port, ev, timeout_ticks);
2757 	else
2758 		return (fp_ops->dequeue_burst)(port, ev, nb_events,
2759 					       timeout_ticks);
2760 }
2761 
2762 #define RTE_EVENT_DEV_MAINT_OP_FLUSH          (1 << 0)
2763 /**< Force an immediately flush of any buffered events in the port,
2764  * potentially at the cost of additional overhead.
2765  *
2766  * @see rte_event_maintain()
2767  */
2768 
2769 /**
2770  * Maintain an event device.
2771  *
2772  * This function is only relevant for event devices which do not have
2773  * the @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE flag set. Such devices
2774  * require an application thread using a particular port to
2775  * periodically call rte_event_maintain() on that port during periods
2776  * which it is neither attempting to enqueue events to nor dequeue
2777  * events from the port. rte_event_maintain() is a low-overhead
2778  * function and should be called at a high rate (e.g., in the
2779  * application's poll loop).
2780  *
2781  * No port may be left unmaintained.
2782  *
2783  * At the application thread's convenience, rte_event_maintain() may
2784  * (but is not required to) be called even during periods when enqueue
2785  * or dequeue functions are being called, at the cost of a slight
2786  * increase in overhead.
2787  *
2788  * rte_event_maintain() may be called on event devices which have set
2789  * @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, in which case it is a
2790  * no-operation.
2791  *
2792  * @param dev_id
2793  *   The identifier of the device.
2794  * @param port_id
2795  *   The identifier of the event port.
2796  * @param op
2797  *   0, or @ref RTE_EVENT_DEV_MAINT_OP_FLUSH.
2798  * @return
2799  *  - 0 on success.
2800  *  - -EINVAL if *dev_id*,  *port_id*, or *op* is invalid.
2801  *
2802  * @see RTE_EVENT_DEV_CAP_MAINTENANCE_FREE
2803  */
2804 static inline int
2805 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2806 {
2807 	const struct rte_event_fp_ops *fp_ops;
2808 	void *port;
2809 
2810 	fp_ops = &rte_event_fp_ops[dev_id];
2811 	port = fp_ops->data[port_id];
2812 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2813 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2814 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2815 		return -EINVAL;
2816 
2817 	if (port == NULL)
2818 		return -EINVAL;
2819 
2820 	if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2821 		return -EINVAL;
2822 #endif
2823 	rte_eventdev_trace_maintain(dev_id, port_id, op);
2824 
2825 	if (fp_ops->maintain != NULL)
2826 		fp_ops->maintain(port, op);
2827 
2828 	return 0;
2829 }
2830 
2831 /**
2832  * Change the active profile on an event port.
2833  *
2834  * This function is used to change the current active profile on an event port
2835  * when multiple link profiles are configured on an event port through the
2836  * function call ``rte_event_port_profile_links_set``.
2837  *
2838  * On the subsequent ``rte_event_dequeue_burst`` call, only the event queues
2839  * that were associated with the newly active profile will participate in
2840  * scheduling.
2841  *
2842  * @param dev_id
2843  *   The identifier of the device.
2844  * @param port_id
2845  *   The identifier of the event port.
2846  * @param profile_id
2847  *   The identifier of the profile.
2848  * @return
2849  *  - 0 on success.
2850  *  - -EINVAL if *dev_id*,  *port_id*, or *profile_id* is invalid.
2851  */
2852 static inline uint8_t
2853 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2854 {
2855 	const struct rte_event_fp_ops *fp_ops;
2856 	void *port;
2857 
2858 	fp_ops = &rte_event_fp_ops[dev_id];
2859 	port = fp_ops->data[port_id];
2860 
2861 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2862 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2863 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2864 		return -EINVAL;
2865 
2866 	if (port == NULL)
2867 		return -EINVAL;
2868 
2869 	if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2870 		return -EINVAL;
2871 #endif
2872 	rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2873 
2874 	return fp_ops->profile_switch(port, profile_id);
2875 }
2876 
2877 #ifdef __cplusplus
2878 }
2879 #endif
2880 
2881 #endif /* _RTE_EVENTDEV_H_ */
2882