xref: /dpdk/lib/eventdev/rte_eventdev.h (revision 3da59f30a23f2e795d2315f3d949e1b3e0ce0c3d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
11 /**
12  * @file
13  *
14  * RTE Event Device API
15  *
16  * In a polling model, lcores poll ethdev ports and associated rx queues
17  * directly to look for packet. In an event driven model, by contrast, lcores
18  * call the scheduler that selects packets for them based on programmer
19  * specified criteria. Eventdev library adds support for event driven
20  * programming model, which offer applications automatic multicore scaling,
21  * dynamic load balancing, pipelining, packet ingress order maintenance and
22  * synchronization services to simplify application packet processing.
23  *
24  * The Event Device API is composed of two parts:
25  *
26  * - The application-oriented Event API that includes functions to setup
27  *   an event device (configure it, setup its queues, ports and start it), to
28  *   establish the link between queues to port and to receive events, and so on.
29  *
30  * - The driver-oriented Event API that exports a function allowing
31  *   an event poll Mode Driver (PMD) to simultaneously register itself as
32  *   an event device driver.
33  *
34  * Event device components:
35  *
36  *                     +-----------------+
37  *                     | +-------------+ |
38  *        +-------+    | |    flow 0   | |
39  *        |Packet |    | +-------------+ |
40  *        |event  |    | +-------------+ |
41  *        |       |    | |    flow 1   | |port_link(port0, queue0)
42  *        +-------+    | +-------------+ |     |     +--------+
43  *        +-------+    | +-------------+ o-----v-----o        |dequeue +------+
44  *        |Crypto |    | |    flow n   | |           | event  +------->|Core 0|
45  *        |work   |    | +-------------+ o----+      | port 0 |        |      |
46  *        |done ev|    |  event queue 0  |    |      +--------+        +------+
47  *        +-------+    +-----------------+    |
48  *        +-------+                           |
49  *        |Timer  |    +-----------------+    |      +--------+
50  *        |expiry |    | +-------------+ |    +------o        |dequeue +------+
51  *        |event  |    | |    flow 0   | o-----------o event  +------->|Core 1|
52  *        +-------+    | +-------------+ |      +----o port 1 |        |      |
53  *       Event enqueue | +-------------+ |      |    +--------+        +------+
54  *     o-------------> | |    flow 1   | |      |
55  *        enqueue(     | +-------------+ |      |
56  *        queue_id,    |                 |      |    +--------+        +------+
57  *        flow_id,     | +-------------+ |      |    |        |dequeue |Core 2|
58  *        sched_type,  | |    flow n   | o-----------o event  +------->|      |
59  *        event_type,  | +-------------+ |      |    | port 2 |        +------+
60  *        subev_type,  |  event queue 1  |      |    +--------+
61  *        event)       +-----------------+      |    +--------+
62  *                                              |    |        |dequeue +------+
63  *        +-------+    +-----------------+      |    | event  +------->|Core n|
64  *        |Core   |    | +-------------+ o-----------o port n |        |      |
65  *        |(SW)   |    | |    flow 0   | |      |    +--------+        +--+---+
66  *        |event  |    | +-------------+ |      |                         |
67  *        +-------+    | +-------------+ |      |                         |
68  *            ^        | |    flow 1   | |      |                         |
69  *            |        | +-------------+ o------+                         |
70  *            |        | +-------------+ |                                |
71  *            |        | |    flow n   | |                                |
72  *            |        | +-------------+ |                                |
73  *            |        |  event queue n  |                                |
74  *            |        +-----------------+                                |
75  *            |                                                           |
76  *            +-----------------------------------------------------------+
77  *
78  * Event device: A hardware or software-based event scheduler.
79  *
80  * Event: A unit of scheduling that encapsulates a packet or other datatype
81  * like SW generated event from the CPU, Crypto work completion notification,
82  * Timer expiry event notification etc as well as metadata.
83  * The metadata includes flow ID, scheduling type, event priority, event_type,
84  * sub_event_type etc.
85  *
86  * Event queue: A queue containing events that are scheduled by the event dev.
87  * An event queue contains events of different flows associated with scheduling
88  * types, such as atomic, ordered, or parallel.
89  *
90  * Event port: An application's interface into the event dev for enqueue and
91  * dequeue operations. Each event port can be linked with one or more
92  * event queues for dequeue operations.
93  *
94  * By default, all the functions of the Event Device API exported by a PMD
95  * are lock-free functions which assume to not be invoked in parallel on
96  * different logical cores to work on the same target object. For instance,
97  * the dequeue function of a PMD cannot be invoked in parallel on two logical
98  * cores to operates on same  event port. Of course, this function
99  * can be invoked in parallel by different logical cores on different ports.
100  * It is the responsibility of the upper level application to enforce this rule.
101  *
102  * In all functions of the Event API, the Event device is
103  * designated by an integer >= 0 named the device identifier *dev_id*
104  *
105  * At the Event driver level, Event devices are represented by a generic
106  * data structure of type *rte_event_dev*.
107  *
108  * Event devices are dynamically registered during the PCI/SoC device probing
109  * phase performed at EAL initialization time.
110  * When an Event device is being probed, a *rte_event_dev* structure and
111  * a new device identifier are allocated for that device. Then, the
112  * event_dev_init() function supplied by the Event driver matching the probed
113  * device is invoked to properly initialize the device.
114  *
115  * The role of the device init function consists of resetting the hardware or
116  * software event driver implementations.
117  *
118  * If the device init operation is successful, the correspondence between
119  * the device identifier assigned to the new device and its associated
120  * *rte_event_dev* structure is effectively registered.
121  * Otherwise, both the *rte_event_dev* structure and the device identifier are
122  * freed.
123  *
124  * The functions exported by the application Event API to setup a device
125  * designated by its device identifier must be invoked in the following order:
126  *     - rte_event_dev_configure()
127  *     - rte_event_queue_setup()
128  *     - rte_event_port_setup()
129  *     - rte_event_port_link()
130  *     - rte_event_dev_start()
131  *
132  * Then, the application can invoke, in any order, the functions
133  * exported by the Event API to schedule events, dequeue events, enqueue events,
134  * change event queue(s) to event port [un]link establishment and so on.
135  *
136  * Application may use rte_event_[queue/port]_default_conf_get() to get the
137  * default configuration to set up an event queue or event port by
138  * overriding few default values.
139  *
140  * If the application wants to change the configuration (i.e. call
141  * rte_event_dev_configure(), rte_event_queue_setup(), or
142  * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the
143  * device and then do the reconfiguration before calling rte_event_dev_start()
144  * again. The schedule, enqueue and dequeue functions should not be invoked
145  * when the device is stopped.
146  *
147  * Finally, an application can close an Event device by invoking the
148  * rte_event_dev_close() function.
149  *
150  * Each function of the application Event API invokes a specific function
151  * of the PMD that controls the target device designated by its device
152  * identifier.
153  *
154  * For this purpose, all device-specific functions of an Event driver are
155  * supplied through a set of pointers contained in a generic structure of type
156  * *event_dev_ops*.
157  * The address of the *event_dev_ops* structure is stored in the *rte_event_dev*
158  * structure by the device init function of the Event driver, which is
159  * invoked during the PCI/SoC device probing phase, as explained earlier.
160  *
161  * In other words, each function of the Event API simply retrieves the
162  * *rte_event_dev* structure associated with the device identifier and
163  * performs an indirect invocation of the corresponding driver function
164  * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure.
165  *
166  * For performance reasons, the address of the fast-path functions of the
167  * Event driver is not contained in the *event_dev_ops* structure.
168  * Instead, they are directly stored at the beginning of the *rte_event_dev*
169  * structure to avoid an extra indirect memory access during their invocation.
170  *
171  * RTE event device drivers do not use interrupts for enqueue or dequeue
172  * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue
173  * functions to applications.
174  *
175  * The events are injected to event device through *enqueue* operation by
176  * event producers in the system. The typical event producers are ethdev
177  * subsystem for generating packet events, CPU(SW) for generating events based
178  * on different stages of application processing, cryptodev for generating
179  * crypto work completion notification etc
180  *
181  * The *dequeue* operation gets one or more events from the event ports.
182  * The application process the events and send to downstream event queue through
183  * rte_event_enqueue_burst() if it is an intermediate stage of event processing,
184  * on the final stage, the application may use Tx adapter API for maintaining
185  * the ingress order and then send the packet/event on the wire.
186  *
187  * The point at which events are scheduled to ports depends on the device.
188  * For hardware devices, scheduling occurs asynchronously without any software
189  * intervention. Software schedulers can either be distributed
190  * (each worker thread schedules events to its own port) or centralized
191  * (a dedicated thread schedules to all ports). Distributed software schedulers
192  * perform the scheduling in rte_event_dequeue_burst(), whereas centralized
193  * scheduler logic need a dedicated service core for scheduling.
194  * The RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag is not set
195  * indicates the device is centralized and thus needs a dedicated scheduling
196  * thread that repeatedly calls software specific scheduling function.
197  *
198  * An event driven worker thread has following typical workflow on fastpath:
199  * \code{.c}
200  *	while (1) {
201  *		rte_event_dequeue_burst(...);
202  *		(event processing)
203  *		rte_event_enqueue_burst(...);
204  *	}
205  * \endcode
206  */
207 
208 #ifdef __cplusplus
209 extern "C" {
210 #endif
211 
212 #include <rte_compat.h>
213 #include <rte_common.h>
214 #include <rte_errno.h>
215 #include <rte_mbuf_pool_ops.h>
216 #include <rte_mempool.h>
217 
218 #include "rte_eventdev_trace_fp.h"
219 
220 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
221 struct rte_event;
222 
223 /* Event device capability bitmap flags */
224 #define RTE_EVENT_DEV_CAP_QUEUE_QOS           (1ULL << 0)
225 /**< Event scheduling prioritization is based on the priority and weight
226  * associated with each event queue. Events from a queue with highest priority
227  * is scheduled first. If the queues are of same priority, weight of the queues
228  * are considered to select a queue in a weighted round robin fashion.
229  * Subsequent dequeue calls from an event port could see events from the same
230  * event queue, if the queue is configured with an affinity count. Affinity
231  * count is the number of subsequent dequeue calls, in which an event port
232  * should use the same event queue if the queue is non-empty
233  *
234  *  @see rte_event_queue_setup(), rte_event_queue_attr_set()
235  */
236 #define RTE_EVENT_DEV_CAP_EVENT_QOS           (1ULL << 1)
237 /**< Event scheduling prioritization is based on the priority associated with
238  *  each event. Priority of each event is supplied in *rte_event* structure
239  *  on each enqueue operation.
240  *
241  *  @see rte_event_enqueue_burst()
242  */
243 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED   (1ULL << 2)
244 /**< Event device operates in distributed scheduling mode.
245  * In distributed scheduling mode, event scheduling happens in HW or
246  * rte_event_dequeue_burst() or the combination of these two.
247  * If the flag is not set then eventdev is centralized and thus needs a
248  * dedicated service core that acts as a scheduling thread .
249  *
250  * @see rte_event_dequeue_burst()
251  */
252 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES     (1ULL << 3)
253 /**< Event device is capable of accepting enqueued events, of any type
254  * advertised as supported by the device, to all destination queues.
255  *
256  * When this capability is set, the "schedule_type" field of the
257  * rte_event_queue_conf structure is ignored when a queue is being configured.
258  * Instead the "sched_type" field of each event enqueued is used to
259  * select the scheduling to be performed on that event.
260  *
261  * If this capability is not set, the queue only supports events of the
262  *  *RTE_SCHED_TYPE_* type specified in the rte_event_queue_conf structure
263  *  at time of configuration.
264  *
265  * @see RTE_SCHED_TYPE_ATOMIC
266  * @see RTE_SCHED_TYPE_ORDERED
267  * @see RTE_SCHED_TYPE_PARALLEL
268  * @see rte_event_queue_conf.schedule_type
269  */
270 #define RTE_EVENT_DEV_CAP_BURST_MODE          (1ULL << 4)
271 /**< Event device is capable of operating in burst mode for enqueue(forward,
272  * release) and dequeue operation. If this capability is not set, application
273  * still uses the rte_event_dequeue_burst() and rte_event_enqueue_burst() but
274  * PMD accepts only one event at a time.
275  *
276  * @see rte_event_dequeue_burst() rte_event_enqueue_burst()
277  */
278 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE    (1ULL << 5)
279 /**< Event device ports support disabling the implicit release feature, in
280  * which the port will release all unreleased events in its dequeue operation.
281  * If this capability is set and the port is configured with implicit release
282  * disabled, the application is responsible for explicitly releasing events
283  * using either the RTE_EVENT_OP_FORWARD or the RTE_EVENT_OP_RELEASE event
284  * enqueue operations.
285  *
286  * @see rte_event_dequeue_burst() rte_event_enqueue_burst()
287  */
288 
289 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE         (1ULL << 6)
290 /**< Event device is capable of operating in none sequential mode. The path
291  * of the event is not necessary to be sequential. Application can change
292  * the path of event at runtime. If the flag is not set, then event each event
293  * will follow a path from queue 0 to queue 1 to queue 2 etc. If the flag is
294  * set, events may be sent to queues in any order. If the flag is not set, the
295  * eventdev will return an error when the application enqueues an event for a
296  * qid which is not the next in the sequence.
297  */
298 
299 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK   (1ULL << 7)
300 /**< Event device is capable of configuring the queue/port link at runtime.
301  * If the flag is not set, the eventdev queue/port link is only can be
302  * configured during  initialization.
303  */
304 
305 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
306 /**< Event device is capable of setting up the link between multiple queue
307  * with single port. If the flag is not set, the eventdev can only map a
308  * single queue to each port or map a single queue to many port.
309  */
310 
311 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
312 /**< Event device preserves the flow ID from the enqueued
313  * event to the dequeued event if the flag is set. Otherwise,
314  * the content of this field is implementation dependent.
315  */
316 
317 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
318 /**< Event device *does not* require calls to rte_event_maintain().
319  * An event device that does not set this flag requires calls to
320  * rte_event_maintain() during periods when neither
321  * rte_event_dequeue_burst() nor rte_event_enqueue_burst() are called
322  * on a port. This will allow the event device to perform internal
323  * processing, such as flushing buffered events, return credits to a
324  * global pool, or process signaling related to load balancing.
325  */
326 
327 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
328 /**< Event device is capable of changing the queue attributes at runtime i.e
329  * after rte_event_queue_setup() or rte_event_start() call sequence. If this
330  * flag is not set, eventdev queue attributes can only be configured during
331  * rte_event_queue_setup().
332  */
333 
334 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
335 /**< Event device is capable of supporting multiple link profiles per event port
336  * i.e., the value of `rte_event_dev_info::max_profiles_per_port` is greater
337  * than one.
338  */
339 
340 #define RTE_EVENT_DEV_CAP_ATOMIC  (1ULL << 13)
341 /**< Event device is capable of atomic scheduling.
342  * When this flag is set, the application can configure queues with scheduling type
343  * atomic on this event device.
344  * @see RTE_SCHED_TYPE_ATOMIC
345  */
346 
347 #define RTE_EVENT_DEV_CAP_ORDERED  (1ULL << 14)
348 /**< Event device is capable of ordered scheduling.
349  * When this flag is set, the application can configure queues with scheduling type
350  * ordered on this event device.
351  * @see RTE_SCHED_TYPE_ORDERED
352  */
353 
354 #define RTE_EVENT_DEV_CAP_PARALLEL  (1ULL << 15)
355 /**< Event device is capable of parallel scheduling.
356  * When this flag is set, the application can configure queues with scheduling type
357  * parallel on this event device.
358  * @see RTE_SCHED_TYPE_PARALLEL
359  */
360 
361 /* Event device priority levels */
362 #define RTE_EVENT_DEV_PRIORITY_HIGHEST   0
363 /**< Highest priority expressed across eventdev subsystem
364  * @see rte_event_queue_setup(), rte_event_enqueue_burst()
365  * @see rte_event_port_link()
366  */
367 #define RTE_EVENT_DEV_PRIORITY_NORMAL    128
368 /**< Normal priority expressed across eventdev subsystem
369  * @see rte_event_queue_setup(), rte_event_enqueue_burst()
370  * @see rte_event_port_link()
371  */
372 #define RTE_EVENT_DEV_PRIORITY_LOWEST    255
373 /**< Lowest priority expressed across eventdev subsystem
374  * @see rte_event_queue_setup(), rte_event_enqueue_burst()
375  * @see rte_event_port_link()
376  */
377 
378 /* Event queue scheduling weights */
379 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
380 /**< Highest weight of an event queue
381  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
382  */
383 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
384 /**< Lowest weight of an event queue
385  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
386  */
387 
388 /* Event queue scheduling affinity */
389 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
390 /**< Highest scheduling affinity of an event queue
391  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
392  */
393 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
394 /**< Lowest scheduling affinity of an event queue
395  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
396  */
397 
398 /**
399  * Get the total number of event devices that have been successfully
400  * initialised.
401  *
402  * @return
403  *   The total number of usable event devices.
404  */
405 uint8_t
406 rte_event_dev_count(void);
407 
408 /**
409  * Get the device identifier for the named event device.
410  *
411  * @param name
412  *   Event device name to select the event device identifier.
413  *
414  * @return
415  *   Returns event device identifier on success.
416  *   - <0: Failure to find named event device.
417  */
418 int
419 rte_event_dev_get_dev_id(const char *name);
420 
421 /**
422  * Return the NUMA socket to which a device is connected.
423  *
424  * @param dev_id
425  *   The identifier of the device.
426  * @return
427  *   The NUMA socket id to which the device is connected or
428  *   a default of zero if the socket could not be determined.
429  *   -(-EINVAL)  dev_id value is out of range.
430  */
431 int
432 rte_event_dev_socket_id(uint8_t dev_id);
433 
434 /**
435  * Event device information
436  */
437 struct rte_event_dev_info {
438 	const char *driver_name;	/**< Event driver name */
439 	struct rte_device *dev;	/**< Device information */
440 	uint32_t min_dequeue_timeout_ns;
441 	/**< Minimum supported global dequeue timeout(ns) by this device */
442 	uint32_t max_dequeue_timeout_ns;
443 	/**< Maximum supported global dequeue timeout(ns) by this device */
444 	uint32_t dequeue_timeout_ns;
445 	/**< Configured global dequeue timeout(ns) for this device */
446 	uint8_t max_event_queues;
447 	/**< Maximum event_queues supported by this device */
448 	uint32_t max_event_queue_flows;
449 	/**< Maximum supported flows in an event queue by this device*/
450 	uint8_t max_event_queue_priority_levels;
451 	/**< Maximum number of event queue priority levels by this device.
452 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
453 	 */
454 	uint8_t max_event_priority_levels;
455 	/**< Maximum number of event priority levels by this device.
456 	 * Valid when the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability
457 	 */
458 	uint8_t max_event_ports;
459 	/**< Maximum number of event ports supported by this device */
460 	uint8_t max_event_port_dequeue_depth;
461 	/**< Maximum number of events can be dequeued at a time from an
462 	 * event port by this device.
463 	 * A device that does not support bulk dequeue will set this as 1.
464 	 */
465 	uint32_t max_event_port_enqueue_depth;
466 	/**< Maximum number of events can be enqueued at a time from an
467 	 * event port by this device.
468 	 * A device that does not support bulk enqueue will set this as 1.
469 	 */
470 	uint8_t max_event_port_links;
471 	/**< Maximum number of queues that can be linked to a single event
472 	 * port by this device.
473 	 */
474 	int32_t max_num_events;
475 	/**< A *closed system* event dev has a limit on the number of events it
476 	 * can manage at a time. An *open system* event dev does not have a
477 	 * limit and will specify this as -1.
478 	 */
479 	uint32_t event_dev_cap;
480 	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
481 	uint8_t max_single_link_event_port_queue_pairs;
482 	/**< Maximum number of event ports and queues that are optimized for
483 	 * (and only capable of) single-link configurations supported by this
484 	 * device. These ports and queues are not accounted for in
485 	 * max_event_ports or max_event_queues.
486 	 */
487 	uint8_t max_profiles_per_port;
488 	/**< Maximum number of event queue profiles per event port.
489 	 * A device that doesn't support multiple profiles will set this as 1.
490 	 */
491 };
492 
493 /**
494  * Retrieve the contextual information of an event device.
495  *
496  * @param dev_id
497  *   The identifier of the device.
498  *
499  * @param[out] dev_info
500  *   A pointer to a structure of type *rte_event_dev_info* to be filled with the
501  *   contextual information of the device.
502  *
503  * @return
504  *   - 0: Success, driver updates the contextual information of the event device
505  *   - <0: Error code returned by the driver info get function.
506  */
507 int
508 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
509 
510 /**
511  * The count of ports.
512  */
513 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
514 /**
515  * The count of queues.
516  */
517 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
518 /**
519  * The status of the device, zero for stopped, non-zero for started.
520  */
521 #define RTE_EVENT_DEV_ATTR_STARTED 2
522 
523 /**
524  * Get an attribute from a device.
525  *
526  * @param dev_id Eventdev id
527  * @param attr_id The attribute ID to retrieve
528  * @param[out] attr_value A pointer that will be filled in with the attribute
529  *             value if successful.
530  *
531  * @return
532  *   - 0: Successfully retrieved attribute value
533  *   - -EINVAL: Invalid device or  *attr_id* provided, or *attr_value* is NULL
534  */
535 int
536 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
537 		       uint32_t *attr_value);
538 
539 
540 /* Event device configuration bitmap flags */
541 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
542 /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns.
543  *  @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst()
544  */
545 
546 /** Event device configuration structure */
547 struct rte_event_dev_config {
548 	uint32_t dequeue_timeout_ns;
549 	/**< rte_event_dequeue_burst() timeout on this device.
550 	 * This value should be in the range of *min_dequeue_timeout_ns* and
551 	 * *max_dequeue_timeout_ns* which previously provided in
552 	 * rte_event_dev_info_get()
553 	 * The value 0 is allowed, in which case, default dequeue timeout used.
554 	 * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
555 	 */
556 	int32_t nb_events_limit;
557 	/**< In a *closed system* this field is the limit on maximum number of
558 	 * events that can be inflight in the eventdev at a given time. The
559 	 * limit is required to ensure that the finite space in a closed system
560 	 * is not overwhelmed. The value cannot exceed the *max_num_events*
561 	 * as provided by rte_event_dev_info_get().
562 	 * This value should be set to -1 for *open system*.
563 	 */
564 	uint8_t nb_event_queues;
565 	/**< Number of event queues to configure on this device.
566 	 * This value cannot exceed the *max_event_queues* which previously
567 	 * provided in rte_event_dev_info_get()
568 	 */
569 	uint8_t nb_event_ports;
570 	/**< Number of event ports to configure on this device.
571 	 * This value cannot exceed the *max_event_ports* which previously
572 	 * provided in rte_event_dev_info_get()
573 	 */
574 	uint32_t nb_event_queue_flows;
575 	/**< Number of flows for any event queue on this device.
576 	 * This value cannot exceed the *max_event_queue_flows* which previously
577 	 * provided in rte_event_dev_info_get()
578 	 */
579 	uint32_t nb_event_port_dequeue_depth;
580 	/**< Maximum number of events can be dequeued at a time from an
581 	 * event port by this device.
582 	 * This value cannot exceed the *max_event_port_dequeue_depth*
583 	 * which previously provided in rte_event_dev_info_get().
584 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
585 	 * @see rte_event_port_setup()
586 	 */
587 	uint32_t nb_event_port_enqueue_depth;
588 	/**< Maximum number of events can be enqueued at a time from an
589 	 * event port by this device.
590 	 * This value cannot exceed the *max_event_port_enqueue_depth*
591 	 * which previously provided in rte_event_dev_info_get().
592 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
593 	 * @see rte_event_port_setup()
594 	 */
595 	uint32_t event_dev_cfg;
596 	/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
597 	uint8_t nb_single_link_event_port_queues;
598 	/**< Number of event ports and queues that will be singly-linked to
599 	 * each other. These are a subset of the overall event ports and
600 	 * queues; this value cannot exceed *nb_event_ports* or
601 	 * *nb_event_queues*. If the device has ports and queues that are
602 	 * optimized for single-link usage, this field is a hint for how many
603 	 * to allocate; otherwise, regular event ports and queues can be used.
604 	 */
605 };
606 
607 /**
608  * Configure an event device.
609  *
610  * This function must be invoked first before any other function in the
611  * API. This function can also be re-invoked when a device is in the
612  * stopped state.
613  *
614  * The caller may use rte_event_dev_info_get() to get the capability of each
615  * resources available for this event device.
616  *
617  * @param dev_id
618  *   The identifier of the device to configure.
619  * @param dev_conf
620  *   The event device configuration structure.
621  *
622  * @return
623  *   - 0: Success, device configured.
624  *   - <0: Error code returned by the driver configuration function.
625  */
626 int
627 rte_event_dev_configure(uint8_t dev_id,
628 			const struct rte_event_dev_config *dev_conf);
629 
630 /* Event queue specific APIs */
631 
632 /* Event queue configuration bitmap flags */
633 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES          (1ULL << 0)
634 /**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue
635  *
636  * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
637  * @see rte_event_enqueue_burst()
638  */
639 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK        (1ULL << 1)
640 /**< This event queue links only to a single event port.
641  *
642  *  @see rte_event_port_setup(), rte_event_port_link()
643  */
644 
645 /** Event queue configuration structure */
646 struct rte_event_queue_conf {
647 	uint32_t nb_atomic_flows;
648 	/**< The maximum number of active flows this queue can track at any
649 	 * given time. If the queue is configured for atomic scheduling (by
650 	 * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg
651 	 * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the
652 	 * value must be in the range of [1, nb_event_queue_flows], which was
653 	 * previously provided in rte_event_dev_configure().
654 	 */
655 	uint32_t nb_atomic_order_sequences;
656 	/**< The maximum number of outstanding events waiting to be
657 	 * reordered by this queue. In other words, the number of entries in
658 	 * this queue’s reorder buffer.When the number of events in the
659 	 * reorder buffer reaches to *nb_atomic_order_sequences* then the
660 	 * scheduler cannot schedule the events from this queue and invalid
661 	 * event will be returned from dequeue until one or more entries are
662 	 * freed up/released.
663 	 * If the queue is configured for ordered scheduling (by applying the
664 	 * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or
665 	 * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must
666 	 * be in the range of [1, nb_event_queue_flows], which was
667 	 * previously supplied to rte_event_dev_configure().
668 	 */
669 	uint32_t event_queue_cfg;
670 	/**< Queue cfg flags(EVENT_QUEUE_CFG_) */
671 	uint8_t schedule_type;
672 	/**< Queue schedule type(RTE_SCHED_TYPE_*).
673 	 * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in
674 	 * event_queue_cfg.
675 	 */
676 	uint8_t priority;
677 	/**< Priority for this event queue relative to other event queues.
678 	 * The requested priority should in the range of
679 	 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
680 	 * The implementation shall normalize the requested priority to
681 	 * event device supported priority value.
682 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
683 	 */
684 	uint8_t weight;
685 	/**< Weight of the event queue relative to other event queues.
686 	 * The requested weight should be in the range of
687 	 * [RTE_EVENT_DEV_WEIGHT_HIGHEST, RTE_EVENT_DEV_WEIGHT_LOWEST].
688 	 * The implementation shall normalize the requested weight to event
689 	 * device supported weight value.
690 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
691 	 */
692 	uint8_t affinity;
693 	/**< Affinity of the event queue relative to other event queues.
694 	 * The requested affinity should be in the range of
695 	 * [RTE_EVENT_DEV_AFFINITY_HIGHEST, RTE_EVENT_DEV_AFFINITY_LOWEST].
696 	 * The implementation shall normalize the requested affinity to event
697 	 * device supported affinity value.
698 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
699 	 */
700 };
701 
702 /**
703  * Retrieve the default configuration information of an event queue designated
704  * by its *queue_id* from the event driver for an event device.
705  *
706  * This function intended to be used in conjunction with rte_event_queue_setup()
707  * where caller needs to set up the queue by overriding few default values.
708  *
709  * @param dev_id
710  *   The identifier of the device.
711  * @param queue_id
712  *   The index of the event queue to get the configuration information.
713  *   The value must be in the range [0, nb_event_queues - 1]
714  *   previously supplied to rte_event_dev_configure().
715  * @param[out] queue_conf
716  *   The pointer to the default event queue configuration data.
717  * @return
718  *   - 0: Success, driver updates the default event queue configuration data.
719  *   - <0: Error code returned by the driver info get function.
720  *
721  * @see rte_event_queue_setup()
722  */
723 int
724 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
725 				 struct rte_event_queue_conf *queue_conf);
726 
727 /**
728  * Allocate and set up an event queue for an event device.
729  *
730  * @param dev_id
731  *   The identifier of the device.
732  * @param queue_id
733  *   The index of the event queue to setup. The value must be in the range
734  *   [0, nb_event_queues - 1] previously supplied to rte_event_dev_configure().
735  * @param queue_conf
736  *   The pointer to the configuration data to be used for the event queue.
737  *   NULL value is allowed, in which case default configuration	used.
738  *
739  * @see rte_event_queue_default_conf_get()
740  *
741  * @return
742  *   - 0: Success, event queue correctly set up.
743  *   - <0: event queue configuration failed
744  */
745 int
746 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
747 		      const struct rte_event_queue_conf *queue_conf);
748 
749 /**
750  * The priority of the queue.
751  */
752 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
753 /**
754  * The number of atomic flows configured for the queue.
755  */
756 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
757 /**
758  * The number of atomic order sequences configured for the queue.
759  */
760 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
761 /**
762  * The cfg flags for the queue.
763  */
764 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
765 /**
766  * The schedule type of the queue.
767  */
768 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
769 /**
770  * The weight of the queue.
771  */
772 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
773 /**
774  * Affinity of the queue.
775  */
776 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
777 
778 /**
779  * Get an attribute from a queue.
780  *
781  * @param dev_id
782  *   Eventdev id
783  * @param queue_id
784  *   Eventdev queue id
785  * @param attr_id
786  *   The attribute ID to retrieve
787  * @param[out] attr_value
788  *   A pointer that will be filled in with the attribute value if successful
789  *
790  * @return
791  *   - 0: Successfully returned value
792  *   - -EINVAL: invalid device, queue or attr_id provided, or attr_value was
793  *		NULL
794  *   - -EOVERFLOW: returned when attr_id is set to
795  *   RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to
796  *   RTE_EVENT_QUEUE_CFG_ALL_TYPES
797  */
798 int
799 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
800 			uint32_t *attr_value);
801 
802 /**
803  * Set an event queue attribute.
804  *
805  * @param dev_id
806  *   Eventdev id
807  * @param queue_id
808  *   Eventdev queue id
809  * @param attr_id
810  *   The attribute ID to set
811  * @param attr_value
812  *   The attribute value to set
813  *
814  * @return
815  *   - 0: Successfully set attribute.
816  *   - -EINVAL: invalid device, queue or attr_id.
817  *   - -ENOTSUP: device does not support setting the event attribute.
818  *   - <0: failed to set event queue attribute
819  */
820 int
821 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
822 			 uint64_t attr_value);
823 
824 /* Event port specific APIs */
825 
826 /* Event port configuration bitmap flags */
827 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
828 /**< Configure the port not to release outstanding events in
829  * rte_event_dev_dequeue_burst(). If set, all events received through
830  * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
831  * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
832  * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
833  */
834 #define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)
835 /**< This event port links only to a single event queue.
836  *
837  *  @see rte_event_port_setup(), rte_event_port_link()
838  */
839 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER       (1ULL << 2)
840 /**< Hint that this event port will primarily enqueue events to the system.
841  * A PMD can optimize its internal workings by assuming that this port is
842  * primarily going to enqueue NEW events.
843  *
844  * Note that this flag is only a hint, so PMDs must operate under the
845  * assumption that any port can enqueue an event with any type of op.
846  *
847  *  @see rte_event_port_setup()
848  */
849 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER       (1ULL << 3)
850 /**< Hint that this event port will primarily dequeue events from the system.
851  * A PMD can optimize its internal workings by assuming that this port is
852  * primarily going to consume events, and not enqueue FORWARD or RELEASE
853  * events.
854  *
855  * Note that this flag is only a hint, so PMDs must operate under the
856  * assumption that any port can enqueue an event with any type of op.
857  *
858  *  @see rte_event_port_setup()
859  */
860 #define RTE_EVENT_PORT_CFG_HINT_WORKER         (1ULL << 4)
861 /**< Hint that this event port will primarily pass existing events through.
862  * A PMD can optimize its internal workings by assuming that this port is
863  * primarily going to FORWARD events, and not enqueue NEW or RELEASE events
864  * often.
865  *
866  * Note that this flag is only a hint, so PMDs must operate under the
867  * assumption that any port can enqueue an event with any type of op.
868  *
869  *  @see rte_event_port_setup()
870  */
871 
872 /** Event port configuration structure */
873 struct rte_event_port_conf {
874 	int32_t new_event_threshold;
875 	/**< A backpressure threshold for new event enqueues on this port.
876 	 * Use for *closed system* event dev where event capacity is limited,
877 	 * and cannot exceed the capacity of the event dev.
878 	 * Configuring ports with different thresholds can make higher priority
879 	 * traffic less likely to  be backpressured.
880 	 * For example, a port used to inject NIC Rx packets into the event dev
881 	 * can have a lower threshold so as not to overwhelm the device,
882 	 * while ports used for worker pools can have a higher threshold.
883 	 * This value cannot exceed the *nb_events_limit*
884 	 * which was previously supplied to rte_event_dev_configure().
885 	 * This should be set to '-1' for *open system*.
886 	 */
887 	uint16_t dequeue_depth;
888 	/**< Configure number of bulk dequeues for this event port.
889 	 * This value cannot exceed the *nb_event_port_dequeue_depth*
890 	 * which previously supplied to rte_event_dev_configure().
891 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
892 	 */
893 	uint16_t enqueue_depth;
894 	/**< Configure number of bulk enqueues for this event port.
895 	 * This value cannot exceed the *nb_event_port_enqueue_depth*
896 	 * which previously supplied to rte_event_dev_configure().
897 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
898 	 */
899 	uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */
900 };
901 
902 /**
903  * Retrieve the default configuration information of an event port designated
904  * by its *port_id* from the event driver for an event device.
905  *
906  * This function intended to be used in conjunction with rte_event_port_setup()
907  * where caller needs to set up the port by overriding few default values.
908  *
909  * @param dev_id
910  *   The identifier of the device.
911  * @param port_id
912  *   The index of the event port to get the configuration information.
913  *   The value must be in the range [0, nb_event_ports - 1]
914  *   previously supplied to rte_event_dev_configure().
915  * @param[out] port_conf
916  *   The pointer to the default event port configuration data
917  * @return
918  *   - 0: Success, driver updates the default event port configuration data.
919  *   - <0: Error code returned by the driver info get function.
920  *
921  * @see rte_event_port_setup()
922  */
923 int
924 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
925 				struct rte_event_port_conf *port_conf);
926 
927 /**
928  * Allocate and set up an event port for an event device.
929  *
930  * @param dev_id
931  *   The identifier of the device.
932  * @param port_id
933  *   The index of the event port to setup. The value must be in the range
934  *   [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
935  * @param port_conf
936  *   The pointer to the configuration data to be used for the queue.
937  *   NULL value is allowed, in which case default configuration	used.
938  *
939  * @see rte_event_port_default_conf_get()
940  *
941  * @return
942  *   - 0: Success, event port correctly set up.
943  *   - <0: Port configuration failed
944  *   - (-EDQUOT) Quota exceeded(Application tried to link the queue configured
945  *   with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
946  */
947 int
948 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
949 		     const struct rte_event_port_conf *port_conf);
950 
951 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
952 					  struct rte_event event, void *arg);
953 /**< Callback function prototype that can be passed during
954  * rte_event_port_release(), invoked once per a released event.
955  */
956 
957 /**
958  * Quiesce any core specific resources consumed by the event port.
959  *
960  * Event ports are generally coupled with lcores, and a given Hardware
961  * implementation might require the PMD to store port specific data in the
962  * lcore.
963  * When the application decides to migrate the event port to another lcore
964  * or teardown the current lcore it may to call `rte_event_port_quiesce`
965  * to make sure that all the data associated with the event port are released
966  * from the lcore, this might also include any prefetched events.
967  * While releasing the event port from the lcore, this function calls the
968  * user-provided flush callback once per event.
969  *
970  * @note Invocation of this API does not affect the existing port configuration.
971  *
972  * @param dev_id
973  *   The identifier of the device.
974  * @param port_id
975  *   The index of the event port to setup. The value must be in the range
976  *   [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
977  * @param release_cb
978  *   Callback function invoked once per flushed event.
979  * @param args
980  *   Argument supplied to callback.
981  */
982 void
983 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
984 		       rte_eventdev_port_flush_t release_cb, void *args);
985 
986 /**
987  * The queue depth of the port on the enqueue side
988  */
989 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
990 /**
991  * The queue depth of the port on the dequeue side
992  */
993 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
994 /**
995  * The new event threshold of the port
996  */
997 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
998 /**
999  * The implicit release disable attribute of the port
1000  */
1001 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
1002 
1003 /**
1004  * Get an attribute from a port.
1005  *
1006  * @param dev_id
1007  *   Eventdev id
1008  * @param port_id
1009  *   Eventdev port id
1010  * @param attr_id
1011  *   The attribute ID to retrieve
1012  * @param[out] attr_value
1013  *   A pointer that will be filled in with the attribute value if successful
1014  *
1015  * @return
1016  *   - 0: Successfully returned value
1017  *   - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL
1018  */
1019 int
1020 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1021 			uint32_t *attr_value);
1022 
1023 /**
1024  * Start an event device.
1025  *
1026  * The device start step is the last one and consists of setting the event
1027  * queues to start accepting the events and schedules to event ports.
1028  *
1029  * On success, all basic functions exported by the API (event enqueue,
1030  * event dequeue and so on) can be invoked.
1031  *
1032  * @param dev_id
1033  *   Event device identifier
1034  * @return
1035  *   - 0: Success, device started.
1036  *   - -ESTALE : Not all ports of the device are configured
1037  *   - -ENOLINK: Not all queues are linked, which could lead to deadlock.
1038  */
1039 int
1040 rte_event_dev_start(uint8_t dev_id);
1041 
1042 /**
1043  * Stop an event device.
1044  *
1045  * This function causes all queued events to be drained, including those
1046  * residing in event ports. While draining events out of the device, this
1047  * function calls the user-provided flush callback (if one was registered) once
1048  * per event.
1049  *
1050  * The device can be restarted with a call to rte_event_dev_start(). Threads
1051  * that continue to enqueue/dequeue while the device is stopped, or being
1052  * stopped, will result in undefined behavior. This includes event adapters,
1053  * which must be stopped prior to stopping the eventdev.
1054  *
1055  * @param dev_id
1056  *   Event device identifier.
1057  *
1058  * @see rte_event_dev_stop_flush_callback_register()
1059  */
1060 void
1061 rte_event_dev_stop(uint8_t dev_id);
1062 
1063 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1064 					  struct rte_event event, void *arg);
1065 /**< Callback function called during rte_event_dev_stop(), invoked once per
1066  * flushed event.
1067  */
1068 
1069 /**
1070  * Registers a callback function to be invoked during rte_event_dev_stop() for
1071  * each flushed event. This function can be used to properly dispose of queued
1072  * events, for example events containing memory pointers.
1073  *
1074  * The callback function is only registered for the calling process. The
1075  * callback function must be registered in every process that can call
1076  * rte_event_dev_stop().
1077  *
1078  * To unregister a callback, call this function with a NULL callback pointer.
1079  *
1080  * @param dev_id
1081  *   The identifier of the device.
1082  * @param callback
1083  *   Callback function invoked once per flushed event.
1084  * @param userdata
1085  *   Argument supplied to callback.
1086  *
1087  * @return
1088  *  - 0 on success.
1089  *  - -EINVAL if *dev_id* is invalid
1090  *
1091  * @see rte_event_dev_stop()
1092  */
1093 int rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1094 					       rte_eventdev_stop_flush_t callback, void *userdata);
1095 
1096 /**
1097  * Close an event device. The device cannot be restarted!
1098  *
1099  * @param dev_id
1100  *   Event device identifier
1101  *
1102  * @return
1103  *  - 0 on successfully closing device
1104  *  - <0 on failure to close device
1105  *  - (-EAGAIN) if device is busy
1106  */
1107 int
1108 rte_event_dev_close(uint8_t dev_id);
1109 
1110 /**
1111  * Event vector structure.
1112  */
1113 struct rte_event_vector {
1114 	uint16_t nb_elem;
1115 	/**< Number of elements valid in this event vector. */
1116 	uint16_t elem_offset : 12;
1117 	/**< Offset into the vector array where valid elements start from. */
1118 	uint16_t rsvd : 3;
1119 	/**< Reserved for future use */
1120 	uint16_t attr_valid : 1;
1121 	/**< Indicates that the below union attributes have valid information.
1122 	 */
1123 	union {
1124 		/* Used by Rx/Tx adapter.
1125 		 * Indicates that all the elements in this vector belong to the
1126 		 * same port and queue pair when originating from Rx adapter,
1127 		 * valid only when event type is ETHDEV_VECTOR or
1128 		 * ETH_RX_ADAPTER_VECTOR.
1129 		 * Can also be used to indicate the Tx adapter the destination
1130 		 * port and queue of the mbufs in the vector
1131 		 */
1132 		struct {
1133 			uint16_t port;
1134 			/* Ethernet device port id. */
1135 			uint16_t queue;
1136 			/* Ethernet device queue id. */
1137 		};
1138 	};
1139 	/**< Union to hold common attributes of the vector array. */
1140 	uint64_t impl_opaque;
1141 
1142 /* empty structures do not have zero size in C++ leading to compilation errors
1143  * with clang about structure having different sizes in C and C++.
1144  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1145  * C++ builds, removing the warning.
1146  */
1147 #ifndef __cplusplus
1148 	/**< Implementation specific opaque value.
1149 	 * An implementation may use this field to hold implementation specific
1150 	 * value to share between dequeue and enqueue operation.
1151 	 * The application should not modify this field.
1152 	 */
1153 	union {
1154 #endif
1155 		struct rte_mbuf *mbufs[0];
1156 		void *ptrs[0];
1157 		uint64_t u64s[0];
1158 #ifndef __cplusplus
1159 	} __rte_aligned(16);
1160 #endif
1161 	/**< Start of the vector array union. Depending upon the event type the
1162 	 * vector array can be an array of mbufs or pointers or opaque u64
1163 	 * values.
1164 	 */
1165 } __rte_aligned(16);
1166 
1167 /* Scheduler type definitions */
1168 #define RTE_SCHED_TYPE_ORDERED          0
1169 /**< Ordered scheduling
1170  *
1171  * Events from an ordered flow of an event queue can be scheduled to multiple
1172  * ports for concurrent processing while maintaining the original event order.
1173  * This scheme enables the user to achieve high single flow throughput by
1174  * avoiding SW synchronization for ordering between ports which bound to cores.
1175  *
1176  * The source flow ordering from an event queue is maintained when events are
1177  * enqueued to their destination queue within the same ordered flow context.
1178  * An event port holds the context until application call
1179  * rte_event_dequeue_burst() from the same port, which implicitly releases
1180  * the context.
1181  * User may allow the scheduler to release the context earlier than that
1182  * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation.
1183  *
1184  * Events from the source queue appear in their original order when dequeued
1185  * from a destination queue.
1186  * Event ordering is based on the received event(s), but also other
1187  * (newly allocated or stored) events are ordered when enqueued within the same
1188  * ordered context. Events not enqueued (e.g. released or stored) within the
1189  * context are  considered missing from reordering and are skipped at this time
1190  * (but can be ordered again within another context).
1191  *
1192  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1193  */
1194 
1195 #define RTE_SCHED_TYPE_ATOMIC           1
1196 /**< Atomic scheduling
1197  *
1198  * Events from an atomic flow of an event queue can be scheduled only to a
1199  * single port at a time. The port is guaranteed to have exclusive (atomic)
1200  * access to the associated flow context, which enables the user to avoid SW
1201  * synchronization. Atomic flows also help to maintain event ordering
1202  * since only one port at a time can process events from a flow of an
1203  * event queue.
1204  *
1205  * The atomic queue synchronization context is dedicated to the port until
1206  * application call rte_event_dequeue_burst() from the same port,
1207  * which implicitly releases the context. User may allow the scheduler to
1208  * release the context earlier than that by invoking rte_event_enqueue_burst()
1209  * with RTE_EVENT_OP_RELEASE operation.
1210  *
1211  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1212  */
1213 
1214 #define RTE_SCHED_TYPE_PARALLEL         2
1215 /**< Parallel scheduling
1216  *
1217  * The scheduler performs priority scheduling, load balancing, etc. functions
1218  * but does not provide additional event synchronization or ordering.
1219  * It is free to schedule events from a single parallel flow of an event queue
1220  * to multiple events ports for concurrent processing.
1221  * The application is responsible for flow context synchronization and
1222  * event ordering (SW synchronization).
1223  *
1224  * @see rte_event_queue_setup(), rte_event_dequeue_burst()
1225  */
1226 
1227 /* Event types to classify the event source */
1228 #define RTE_EVENT_TYPE_ETHDEV           0x0
1229 /**< The event generated from ethdev subsystem */
1230 #define RTE_EVENT_TYPE_CRYPTODEV        0x1
1231 /**< The event generated from crypodev subsystem */
1232 #define RTE_EVENT_TYPE_TIMER		0x2
1233 /**< The event generated from event timer adapter */
1234 #define RTE_EVENT_TYPE_CPU              0x3
1235 /**< The event generated from cpu for pipelining.
1236  * Application may use *sub_event_type* to further classify the event
1237  */
1238 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER   0x4
1239 /**< The event generated from event eth Rx adapter */
1240 #define RTE_EVENT_TYPE_DMADEV           0x5
1241 /**< The event generated from dma subsystem */
1242 #define RTE_EVENT_TYPE_VECTOR           0x8
1243 /**< Indicates that event is a vector.
1244  * All vector event types should be a logical OR of EVENT_TYPE_VECTOR.
1245  * This simplifies the pipeline design as one can split processing the events
1246  * between vector events and normal event across event types.
1247  * Example:
1248  *	if (ev.event_type & RTE_EVENT_TYPE_VECTOR) {
1249  *		// Classify and handle vector event.
1250  *	} else {
1251  *		// Classify and handle event.
1252  *	}
1253  */
1254 #define RTE_EVENT_TYPE_ETHDEV_VECTOR                                           \
1255 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1256 /**< The event vector generated from ethdev subsystem */
1257 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1258 /**< The event vector generated from cpu for pipelining. */
1259 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR                                   \
1260 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1261 /**< The event vector generated from eth Rx adapter. */
1262 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
1263 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1264 /**< The event vector generated from cryptodev adapter. */
1265 
1266 #define RTE_EVENT_TYPE_MAX              0x10
1267 /**< Maximum number of event types */
1268 
1269 /* Event enqueue operations */
1270 #define RTE_EVENT_OP_NEW                0
1271 /**< The event producers use this operation to inject a new event to the
1272  * event device.
1273  */
1274 #define RTE_EVENT_OP_FORWARD            1
1275 /**< The CPU use this operation to forward the event to different event queue or
1276  * change to new application specific flow or schedule type to enable
1277  * pipelining.
1278  *
1279  * This operation must only be enqueued to the same port that the
1280  * event to be forwarded was dequeued from.
1281  */
1282 #define RTE_EVENT_OP_RELEASE            2
1283 /**< Release the flow context associated with the schedule type.
1284  *
1285  * If current flow's scheduler type method is *RTE_SCHED_TYPE_ATOMIC*
1286  * then this function hints the scheduler that the user has completed critical
1287  * section processing in the current atomic context.
1288  * The scheduler is now allowed to schedule events from the same flow from
1289  * an event queue to another port. However, the context may be still held
1290  * until the next rte_event_dequeue_burst() call, this call allows but does not
1291  * force the scheduler to release the context early.
1292  *
1293  * Early atomic context release may increase parallelism and thus system
1294  * performance, but the user needs to design carefully the split into critical
1295  * vs non-critical sections.
1296  *
1297  * If current flow's scheduler type method is *RTE_SCHED_TYPE_ORDERED*
1298  * then this function hints the scheduler that the user has done all that need
1299  * to maintain event order in the current ordered context.
1300  * The scheduler is allowed to release the ordered context of this port and
1301  * avoid reordering any following enqueues.
1302  *
1303  * Early ordered context release may increase parallelism and thus system
1304  * performance.
1305  *
1306  * If current flow's scheduler type method is *RTE_SCHED_TYPE_PARALLEL*
1307  * or no scheduling context is held then this function may be an NOOP,
1308  * depending on the implementation.
1309  *
1310  * This operation must only be enqueued to the same port that the
1311  * event to be released was dequeued from.
1312  */
1313 
1314 /**
1315  * The generic *rte_event* structure to hold the event attributes
1316  * for dequeue and enqueue operation
1317  */
1318 struct rte_event {
1319 	/** WORD0 */
1320 	union {
1321 		uint64_t event;
1322 		/** Event attributes for dequeue or enqueue operation */
1323 		struct {
1324 			uint32_t flow_id:20;
1325 			/**< Targeted flow identifier for the enqueue and
1326 			 * dequeue operation.
1327 			 * The value must be in the range of
1328 			 * [0, nb_event_queue_flows - 1] which
1329 			 * previously supplied to rte_event_dev_configure().
1330 			 */
1331 			uint32_t sub_event_type:8;
1332 			/**< Sub-event types based on the event source.
1333 			 * @see RTE_EVENT_TYPE_CPU
1334 			 */
1335 			uint32_t event_type:4;
1336 			/**< Event type to classify the event source.
1337 			 * @see RTE_EVENT_TYPE_ETHDEV, (RTE_EVENT_TYPE_*)
1338 			 */
1339 			uint8_t op:2;
1340 			/**< The type of event enqueue operation - new/forward/
1341 			 * etc.This field is not preserved across an instance
1342 			 * and is undefined on dequeue.
1343 			 * @see RTE_EVENT_OP_NEW, (RTE_EVENT_OP_*)
1344 			 */
1345 			uint8_t rsvd:4;
1346 			/**< Reserved for future use */
1347 			uint8_t sched_type:2;
1348 			/**< Scheduler synchronization type (RTE_SCHED_TYPE_*)
1349 			 * associated with flow id on a given event queue
1350 			 * for the enqueue and dequeue operation.
1351 			 */
1352 			uint8_t queue_id;
1353 			/**< Targeted event queue identifier for the enqueue or
1354 			 * dequeue operation.
1355 			 * The value must be in the range of
1356 			 * [0, nb_event_queues - 1] which previously supplied to
1357 			 * rte_event_dev_configure().
1358 			 */
1359 			uint8_t priority;
1360 			/**< Event priority relative to other events in the
1361 			 * event queue. The requested priority should in the
1362 			 * range of  [RTE_EVENT_DEV_PRIORITY_HIGHEST,
1363 			 * RTE_EVENT_DEV_PRIORITY_LOWEST].
1364 			 * The implementation shall normalize the requested
1365 			 * priority to supported priority value.
1366 			 * Valid when the device has
1367 			 * RTE_EVENT_DEV_CAP_EVENT_QOS capability.
1368 			 */
1369 			uint8_t impl_opaque;
1370 			/**< Implementation specific opaque value.
1371 			 * An implementation may use this field to hold
1372 			 * implementation specific value to share between
1373 			 * dequeue and enqueue operation.
1374 			 * The application should not modify this field.
1375 			 */
1376 		};
1377 	};
1378 	/** WORD1 */
1379 	union {
1380 		uint64_t u64;
1381 		/**< Opaque 64-bit value */
1382 		void *event_ptr;
1383 		/**< Opaque event pointer */
1384 		struct rte_mbuf *mbuf;
1385 		/**< mbuf pointer if dequeued event is associated with mbuf */
1386 		struct rte_event_vector *vec;
1387 		/**< Event vector pointer. */
1388 	};
1389 };
1390 
1391 /* Ethdev Rx adapter capability bitmap flags */
1392 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT	0x1
1393 /**< This flag is sent when the packet transfer mechanism is in HW.
1394  * Ethdev can send packets to the event device using internal event port.
1395  */
1396 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ	0x2
1397 /**< Adapter supports multiple event queues per ethdev. Every ethdev
1398  * Rx queue can be connected to a unique event queue.
1399  */
1400 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
1401 /**< The application can override the adapter generated flow ID in the
1402  * event. This flow ID can be specified when adding an ethdev Rx queue
1403  * to the adapter using the ev.flow_id member.
1404  * @see struct rte_event_eth_rx_adapter_queue_conf::ev
1405  * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
1406  */
1407 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR	0x8
1408 /**< Adapter supports event vectorization per ethdev. */
1409 
1410 /**
1411  * Retrieve the event device's ethdev Rx adapter capabilities for the
1412  * specified ethernet port
1413  *
1414  * @param dev_id
1415  *   The identifier of the device.
1416  *
1417  * @param eth_port_id
1418  *   The identifier of the ethernet device.
1419  *
1420  * @param[out] caps
1421  *   A pointer to memory filled with Rx event adapter capabilities.
1422  *
1423  * @return
1424  *   - 0: Success, driver provides Rx event adapter capabilities for the
1425  *	ethernet device.
1426  *   - <0: Error code returned by the driver function.
1427  */
1428 int
1429 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1430 				uint32_t *caps);
1431 
1432 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1433 /**< This flag is set when the timer mechanism is in HW. */
1434 
1435 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC      (1ULL << 1)
1436 /**< This flag is set if periodic mode is supported. */
1437 
1438 /**
1439  * Retrieve the event device's timer adapter capabilities.
1440  *
1441  * @param dev_id
1442  *   The identifier of the device.
1443  *
1444  * @param[out] caps
1445  *   A pointer to memory to be filled with event timer adapter capabilities.
1446  *
1447  * @return
1448  *   - 0: Success, driver provided event timer adapter capabilities.
1449  *   - <0: Error code returned by the driver function.
1450  */
1451 int
1452 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1453 
1454 /* Crypto adapter capability bitmap flag */
1455 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW   0x1
1456 /**< Flag indicates HW is capable of generating events in
1457  * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send
1458  * packets to the event device as new events using an internal
1459  * event port.
1460  */
1461 
1462 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD   0x2
1463 /**< Flag indicates HW is capable of generating events in
1464  * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send
1465  * packets to the event device as forwarded event using an
1466  * internal event port.
1467  */
1468 
1469 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND  0x4
1470 /**< Flag indicates HW is capable of mapping crypto queue pair to
1471  * event queue.
1472  */
1473 
1474 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA   0x8
1475 /**< Flag indicates HW/SW supports a mechanism to store and retrieve
1476  * the private data information along with the crypto session.
1477  */
1478 
1479 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
1480 /**< Flag indicates HW is capable of aggregating processed
1481  * crypto operations into rte_event_vector.
1482  */
1483 
1484 /**
1485  * Retrieve the event device's crypto adapter capabilities for the
1486  * specified cryptodev device
1487  *
1488  * @param dev_id
1489  *   The identifier of the device.
1490  *
1491  * @param cdev_id
1492  *   The identifier of the cryptodev device.
1493  *
1494  * @param[out] caps
1495  *   A pointer to memory filled with event adapter capabilities.
1496  *   It is expected to be pre-allocated & initialized by caller.
1497  *
1498  * @return
1499  *   - 0: Success, driver provides event adapter capabilities for the
1500  *     cryptodev device.
1501  *   - <0: Error code returned by the driver function.
1502  */
1503 int
1504 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1505 				  uint32_t *caps);
1506 
1507 /* DMA adapter capability bitmap flag */
1508 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1509 /**< Flag indicates HW is capable of generating events in
1510  * RTE_EVENT_OP_NEW enqueue operation. DMADEV will send
1511  * packets to the event device as new events using an
1512  * internal event port.
1513  */
1514 
1515 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1516 /**< Flag indicates HW is capable of generating events in
1517  * RTE_EVENT_OP_FORWARD enqueue operation. DMADEV will send
1518  * packets to the event device as forwarded event using an
1519  * internal event port.
1520  */
1521 
1522 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1523 /**< Flag indicates HW is capable of mapping DMA vchan to event queue. */
1524 
1525 /**
1526  * Retrieve the event device's DMA adapter capabilities for the
1527  * specified dmadev device
1528  *
1529  * @param dev_id
1530  *   The identifier of the device.
1531  *
1532  * @param dmadev_id
1533  *   The identifier of the dmadev device.
1534  *
1535  * @param[out] caps
1536  *   A pointer to memory filled with event adapter capabilities.
1537  *   It is expected to be pre-allocated & initialized by caller.
1538  *
1539  * @return
1540  *   - 0: Success, driver provides event adapter capabilities for the
1541  *     dmadev device.
1542  *   - <0: Error code returned by the driver function.
1543  *
1544  */
1545 __rte_experimental
1546 int
1547 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1548 
1549 /* Ethdev Tx adapter capability bitmap flags */
1550 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT	0x1
1551 /**< This flag is sent when the PMD supports a packet transmit callback
1552  */
1553 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR	0x2
1554 /**< Indicates that the Tx adapter is capable of handling event vector of
1555  * mbufs.
1556  */
1557 
1558 /**
1559  * Retrieve the event device's eth Tx adapter capabilities
1560  *
1561  * @param dev_id
1562  *   The identifier of the device.
1563  *
1564  * @param eth_port_id
1565  *   The identifier of the ethernet device.
1566  *
1567  * @param[out] caps
1568  *   A pointer to memory filled with eth Tx adapter capabilities.
1569  *
1570  * @return
1571  *   - 0: Success, driver provides eth Tx adapter capabilities.
1572  *   - <0: Error code returned by the driver function.
1573  */
1574 int
1575 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1576 				uint32_t *caps);
1577 
1578 /**
1579  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
1580  *
1581  * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag
1582  * then application can use this function to convert timeout value in
1583  * nanoseconds to implementations specific timeout value supplied in
1584  * rte_event_dequeue_burst()
1585  *
1586  * @param dev_id
1587  *   The identifier of the device.
1588  * @param ns
1589  *   Wait time in nanosecond
1590  * @param[out] timeout_ticks
1591  *   Value for the *timeout_ticks* parameter in rte_event_dequeue_burst()
1592  *
1593  * @return
1594  *  - 0 on success.
1595  *  - -ENOTSUP if the device doesn't support timeouts
1596  *  - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL
1597  *  - other values < 0 on failure.
1598  *
1599  * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
1600  * @see rte_event_dev_configure()
1601  */
1602 int
1603 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1604 					uint64_t *timeout_ticks);
1605 
1606 /**
1607  * Link multiple source event queues supplied in *queues* to the destination
1608  * event port designated by its *port_id* with associated service priority
1609  * supplied in *priorities* on the event device designated by its *dev_id*.
1610  *
1611  * The link establishment shall enable the event port *port_id* from
1612  * receiving events from the specified event queue(s) supplied in *queues*
1613  *
1614  * An event queue may link to one or more event ports.
1615  * The number of links can be established from an event queue to event port is
1616  * implementation defined.
1617  *
1618  * Event queue(s) to event port link establishment can be changed at runtime
1619  * without re-configuring the device to support scaling and to reduce the
1620  * latency of critical work by establishing the link with more event ports
1621  * at runtime.
1622  *
1623  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
1624  * than or equal to one, this function links the event queues to the default
1625  * profile_id i.e. profile_id 0 of the event port.
1626  *
1627  * @param dev_id
1628  *   The identifier of the device.
1629  *
1630  * @param port_id
1631  *   Event port identifier to select the destination port to link.
1632  *
1633  * @param queues
1634  *   Points to an array of *nb_links* event queues to be linked
1635  *   to the event port.
1636  *   NULL value is allowed, in which case this function links all the configured
1637  *   event queues *nb_event_queues* which previously supplied to
1638  *   rte_event_dev_configure() to the event port *port_id*
1639  *
1640  * @param priorities
1641  *   Points to an array of *nb_links* service priorities associated with each
1642  *   event queue link to event port.
1643  *   The priority defines the event port's servicing priority for
1644  *   event queue, which may be ignored by an implementation.
1645  *   The requested priority should in the range of
1646  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
1647  *   The implementation shall normalize the requested priority to
1648  *   implementation supported priority value.
1649  *   NULL value is allowed, in which case this function links the event queues
1650  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
1651  *
1652  * @param nb_links
1653  *   The number of links to establish. This parameter is ignored if queues is
1654  *   NULL.
1655  *
1656  * @return
1657  * The number of links actually established. The return value can be less than
1658  * the value of the *nb_links* parameter when the implementation has the
1659  * limitation on specific queue to port link establishment or if invalid
1660  * parameters are specified in *queues*
1661  * If the return value is less than *nb_links*, the remaining links at the end
1662  * of link[] are not established, and the caller has to take care of them.
1663  * If return value is less than *nb_links* then implementation shall update the
1664  * rte_errno accordingly, Possible rte_errno values are
1665  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
1666  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1667  * (EINVAL) Invalid parameter
1668  */
1669 int
1670 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1671 		    const uint8_t queues[], const uint8_t priorities[],
1672 		    uint16_t nb_links);
1673 
1674 /**
1675  * Unlink multiple source event queues supplied in *queues* from the destination
1676  * event port designated by its *port_id* on the event device designated
1677  * by its *dev_id*.
1678  *
1679  * The unlink call issues an async request to disable the event port *port_id*
1680  * from receiving events from the specified event queue *queue_id*.
1681  * Event queue(s) to event port unlink establishment can be changed at runtime
1682  * without re-configuring the device.
1683  *
1684  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
1685  * than or equal to one, this function unlinks the event queues from the default
1686  * profile identifier i.e. profile 0 of the event port.
1687  *
1688  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
1689  *
1690  * @param dev_id
1691  *   The identifier of the device.
1692  *
1693  * @param port_id
1694  *   Event port identifier to select the destination port to unlink.
1695  *
1696  * @param queues
1697  *   Points to an array of *nb_unlinks* event queues to be unlinked
1698  *   from the event port.
1699  *   NULL value is allowed, in which case this function unlinks all the
1700  *   event queue(s) from the event port *port_id*.
1701  *
1702  * @param nb_unlinks
1703  *   The number of unlinks to establish. This parameter is ignored if queues is
1704  *   NULL.
1705  *
1706  * @return
1707  * The number of unlinks successfully requested. The return value can be less
1708  * than the value of the *nb_unlinks* parameter when the implementation has the
1709  * limitation on specific queue to port unlink establishment or
1710  * if invalid parameters are specified.
1711  * If the return value is less than *nb_unlinks*, the remaining queues at the
1712  * end of queues[] are not unlinked, and the caller has to take care of them.
1713  * If return value is less than *nb_unlinks* then implementation shall update
1714  * the rte_errno accordingly, Possible rte_errno values are
1715  * (EINVAL) Invalid parameter
1716  */
1717 int
1718 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1719 		      uint8_t queues[], uint16_t nb_unlinks);
1720 
1721 /**
1722  * Link multiple source event queues supplied in *queues* to the destination
1723  * event port designated by its *port_id* with associated profile identifier
1724  * supplied in *profile_id* with service priorities supplied in *priorities*
1725  * on the event device designated by its *dev_id*.
1726  *
1727  * If *profile_id* is set to 0 then, the links created by the call `rte_event_port_link`
1728  * will be overwritten.
1729  *
1730  * Event ports by default use profile_id 0 unless it is changed using the
1731  * call ``rte_event_port_profile_switch()``.
1732  *
1733  * The link establishment shall enable the event port *port_id* from
1734  * receiving events from the specified event queue(s) supplied in *queues*
1735  *
1736  * An event queue may link to one or more event ports.
1737  * The number of links can be established from an event queue to event port is
1738  * implementation defined.
1739  *
1740  * Event queue(s) to event port link establishment can be changed at runtime
1741  * without re-configuring the device to support scaling and to reduce the
1742  * latency of critical work by establishing the link with more event ports
1743  * at runtime.
1744  *
1745  * @param dev_id
1746  *   The identifier of the device.
1747  *
1748  * @param port_id
1749  *   Event port identifier to select the destination port to link.
1750  *
1751  * @param queues
1752  *   Points to an array of *nb_links* event queues to be linked
1753  *   to the event port.
1754  *   NULL value is allowed, in which case this function links all the configured
1755  *   event queues *nb_event_queues* which previously supplied to
1756  *   rte_event_dev_configure() to the event port *port_id*
1757  *
1758  * @param priorities
1759  *   Points to an array of *nb_links* service priorities associated with each
1760  *   event queue link to event port.
1761  *   The priority defines the event port's servicing priority for
1762  *   event queue, which may be ignored by an implementation.
1763  *   The requested priority should in the range of
1764  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
1765  *   The implementation shall normalize the requested priority to
1766  *   implementation supported priority value.
1767  *   NULL value is allowed, in which case this function links the event queues
1768  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
1769  *
1770  * @param nb_links
1771  *   The number of links to establish. This parameter is ignored if queues is
1772  *   NULL.
1773  *
1774  * @param profile_id
1775  *   The profile identifier associated with the links between event queues and
1776  *   event port. Should be less than the max capability reported by
1777  *   ``rte_event_dev_info::max_profiles_per_port``
1778  *
1779  * @return
1780  * The number of links actually established. The return value can be less than
1781  * the value of the *nb_links* parameter when the implementation has the
1782  * limitation on specific queue to port link establishment or if invalid
1783  * parameters are specified in *queues*
1784  * If the return value is less than *nb_links*, the remaining links at the end
1785  * of link[] are not established, and the caller has to take care of them.
1786  * If return value is less than *nb_links* then implementation shall update the
1787  * rte_errno accordingly, Possible rte_errno values are
1788  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
1789  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1790  * (EINVAL) Invalid parameter
1791  *
1792  */
1793 __rte_experimental
1794 int
1795 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
1796 				 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
1797 
1798 /**
1799  * Unlink multiple source event queues supplied in *queues* that belong to profile
1800  * designated by *profile_id* from the destination event port designated by its
1801  * *port_id* on the event device designated by its *dev_id*.
1802  *
1803  * If *profile_id* is set to 0 i.e., the default profile then, then this function
1804  * will act as ``rte_event_port_unlink``.
1805  *
1806  * The unlink call issues an async request to disable the event port *port_id*
1807  * from receiving events from the specified event queue *queue_id*.
1808  * Event queue(s) to event port unlink establishment can be changed at runtime
1809  * without re-configuring the device.
1810  *
1811  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
1812  *
1813  * @param dev_id
1814  *   The identifier of the device.
1815  *
1816  * @param port_id
1817  *   Event port identifier to select the destination port to unlink.
1818  *
1819  * @param queues
1820  *   Points to an array of *nb_unlinks* event queues to be unlinked
1821  *   from the event port.
1822  *   NULL value is allowed, in which case this function unlinks all the
1823  *   event queue(s) from the event port *port_id*.
1824  *
1825  * @param nb_unlinks
1826  *   The number of unlinks to establish. This parameter is ignored if queues is
1827  *   NULL.
1828  *
1829  * @param profile_id
1830  *   The profile identifier associated with the links between event queues and
1831  *   event port. Should be less than the max capability reported by
1832  *   ``rte_event_dev_info::max_profiles_per_port``
1833  *
1834  * @return
1835  * The number of unlinks successfully requested. The return value can be less
1836  * than the value of the *nb_unlinks* parameter when the implementation has the
1837  * limitation on specific queue to port unlink establishment or
1838  * if invalid parameters are specified.
1839  * If the return value is less than *nb_unlinks*, the remaining queues at the
1840  * end of queues[] are not unlinked, and the caller has to take care of them.
1841  * If return value is less than *nb_unlinks* then implementation shall update
1842  * the rte_errno accordingly, Possible rte_errno values are
1843  * (EINVAL) Invalid parameter
1844  *
1845  */
1846 __rte_experimental
1847 int
1848 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1849 			      uint16_t nb_unlinks, uint8_t profile_id);
1850 
1851 /**
1852  * Returns the number of unlinks in progress.
1853  *
1854  * This function provides the application with a method to detect when an
1855  * unlink has been completed by the implementation.
1856  *
1857  * @see rte_event_port_unlink() to issue unlink requests.
1858  *
1859  * @param dev_id
1860  *   The identifier of the device.
1861  *
1862  * @param port_id
1863  *   Event port identifier to select port to check for unlinks in progress.
1864  *
1865  * @return
1866  * The number of unlinks that are in progress. A return of zero indicates that
1867  * there are no outstanding unlink requests. A positive return value indicates
1868  * the number of unlinks that are in progress, but are not yet complete.
1869  * A negative return value indicates an error, -EINVAL indicates an invalid
1870  * parameter passed for *dev_id* or *port_id*.
1871  */
1872 int
1873 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
1874 
1875 /**
1876  * Retrieve the list of source event queues and its associated service priority
1877  * linked to the destination event port designated by its *port_id*
1878  * on the event device designated by its *dev_id*.
1879  *
1880  * @param dev_id
1881  *   The identifier of the device.
1882  *
1883  * @param port_id
1884  *   Event port identifier.
1885  *
1886  * @param[out] queues
1887  *   Points to an array of *queues* for output.
1888  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1889  *   store the event queue(s) linked with event port *port_id*
1890  *
1891  * @param[out] priorities
1892  *   Points to an array of *priorities* for output.
1893  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1894  *   store the service priority associated with each event queue linked
1895  *
1896  * @return
1897  * The number of links established on the event port designated by its
1898  *  *port_id*.
1899  * - <0 on failure.
1900  */
1901 int
1902 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1903 			 uint8_t queues[], uint8_t priorities[]);
1904 
1905 /**
1906  * Retrieve the list of source event queues and its service priority
1907  * associated to a *profile_id* and linked to the destination event port
1908  * designated by its *port_id* on the event device designated by its *dev_id*.
1909  *
1910  * @param dev_id
1911  *   The identifier of the device.
1912  *
1913  * @param port_id
1914  *   Event port identifier.
1915  *
1916  * @param[out] queues
1917  *   Points to an array of *queues* for output.
1918  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1919  *   store the event queue(s) linked with event port *port_id*
1920  *
1921  * @param[out] priorities
1922  *   Points to an array of *priorities* for output.
1923  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1924  *   store the service priority associated with each event queue linked
1925  *
1926  * @param profile_id
1927  *   The profile identifier associated with the links between event queues and
1928  *   event port. Should be less than the max capability reported by
1929  *   ``rte_event_dev_info::max_profiles_per_port``
1930  *
1931  * @return
1932  * The number of links established on the event port designated by its
1933  *  *port_id*.
1934  * - <0 on failure.
1935  */
1936 __rte_experimental
1937 int
1938 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1939 				 uint8_t priorities[], uint8_t profile_id);
1940 
1941 /**
1942  * Retrieve the service ID of the event dev. If the adapter doesn't use
1943  * a rte_service function, this function returns -ESRCH.
1944  *
1945  * @param dev_id
1946  *   The identifier of the device.
1947  *
1948  * @param [out] service_id
1949  *   A pointer to a uint32_t, to be filled in with the service id.
1950  *
1951  * @return
1952  *   - 0: Success
1953  *   - <0: Error code on failure, if the event dev doesn't use a rte_service
1954  *   function, this function returns -ESRCH.
1955  */
1956 int
1957 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1958 
1959 /**
1960  * Dump internal information about *dev_id* to the FILE* provided in *f*.
1961  *
1962  * @param dev_id
1963  *   The identifier of the device.
1964  *
1965  * @param f
1966  *   A pointer to a file for output
1967  *
1968  * @return
1969  *   - 0: on success
1970  *   - <0: on failure.
1971  */
1972 int
1973 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1974 
1975 /** Maximum name length for extended statistics counters */
1976 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1977 
1978 /**
1979  * Selects the component of the eventdev to retrieve statistics from.
1980  */
1981 enum rte_event_dev_xstats_mode {
1982 	RTE_EVENT_DEV_XSTATS_DEVICE,
1983 	RTE_EVENT_DEV_XSTATS_PORT,
1984 	RTE_EVENT_DEV_XSTATS_QUEUE,
1985 };
1986 
1987 /**
1988  * A name-key lookup element for extended statistics.
1989  *
1990  * This structure is used to map between names and ID numbers
1991  * for extended ethdev statistics.
1992  */
1993 struct rte_event_dev_xstats_name {
1994 	char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1995 };
1996 
1997 /**
1998  * Retrieve names of extended statistics of an event device.
1999  *
2000  * @param dev_id
2001  *   The identifier of the event device.
2002  * @param mode
2003  *   The mode of statistics to retrieve. Choices include the device statistics,
2004  *   port statistics or queue statistics.
2005  * @param queue_port_id
2006  *   Used to specify the port or queue number in queue or port mode, and is
2007  *   ignored in device mode.
2008  * @param[out] xstats_names
2009  *   Block of memory to insert names into. Must be at least size in capacity.
2010  *   If set to NULL, function returns required capacity.
2011  * @param[out] ids
2012  *   Block of memory to insert ids into. Must be at least size in capacity.
2013  *   If set to NULL, function returns required capacity. The id values returned
2014  *   can be passed to *rte_event_dev_xstats_get* to select statistics.
2015  * @param size
2016  *   Capacity of xstats_names (number of names).
2017  * @return
2018  *   - positive value lower or equal to size: success. The return value
2019  *     is the number of entries filled in the stats table.
2020  *   - positive value higher than size: error, the given statistics table
2021  *     is too small. The return value corresponds to the size that should
2022  *     be given to succeed. The entries in the table are not valid and
2023  *     shall not be used by the caller.
2024  *   - negative value on error:
2025  *        -ENODEV for invalid *dev_id*
2026  *        -EINVAL for invalid mode, queue port or id parameters
2027  *        -ENOTSUP if the device doesn't support this function.
2028  */
2029 int
2030 rte_event_dev_xstats_names_get(uint8_t dev_id,
2031 			       enum rte_event_dev_xstats_mode mode,
2032 			       uint8_t queue_port_id,
2033 			       struct rte_event_dev_xstats_name *xstats_names,
2034 			       uint64_t *ids,
2035 			       unsigned int size);
2036 
2037 /**
2038  * Retrieve extended statistics of an event device.
2039  *
2040  * @param dev_id
2041  *   The identifier of the device.
2042  * @param mode
2043  *  The mode of statistics to retrieve. Choices include the device statistics,
2044  *  port statistics or queue statistics.
2045  * @param queue_port_id
2046  *   Used to specify the port or queue number in queue or port mode, and is
2047  *   ignored in device mode.
2048  * @param ids
2049  *   The id numbers of the stats to get. The ids can be got from the stat
2050  *   position in the stat list from rte_event_dev_get_xstats_names(), or
2051  *   by using rte_event_dev_xstats_by_name_get().
2052  * @param[out] values
2053  *   The values for each stats request by ID.
2054  * @param n
2055  *   The number of stats requested
2056  * @return
2057  *   - positive value: number of stat entries filled into the values array
2058  *   - negative value on error:
2059  *        -ENODEV for invalid *dev_id*
2060  *        -EINVAL for invalid mode, queue port or id parameters
2061  *        -ENOTSUP if the device doesn't support this function.
2062  */
2063 int
2064 rte_event_dev_xstats_get(uint8_t dev_id,
2065 			 enum rte_event_dev_xstats_mode mode,
2066 			 uint8_t queue_port_id,
2067 			 const uint64_t ids[],
2068 			 uint64_t values[], unsigned int n);
2069 
2070 /**
2071  * Retrieve the value of a single stat by requesting it by name.
2072  *
2073  * @param dev_id
2074  *   The identifier of the device
2075  * @param name
2076  *   The stat name to retrieve
2077  * @param[out] id
2078  *   If non-NULL, the numerical id of the stat will be returned, so that further
2079  *   requests for the stat can be got using rte_event_dev_xstats_get, which will
2080  *   be faster as it doesn't need to scan a list of names for the stat.
2081  *   If the stat cannot be found, the id returned will be (unsigned)-1.
2082  * @return
2083  *   - positive value or zero: the stat value
2084  *   - negative value: -EINVAL if stat not found, -ENOTSUP if not supported.
2085  */
2086 uint64_t
2087 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2088 				 uint64_t *id);
2089 
2090 /**
2091  * Reset the values of the xstats of the selected component in the device.
2092  *
2093  * @param dev_id
2094  *   The identifier of the device
2095  * @param mode
2096  *   The mode of the statistics to reset. Choose from device, queue or port.
2097  * @param queue_port_id
2098  *   The queue or port to reset. 0 and positive values select ports and queues,
2099  *   while -1 indicates all ports or queues.
2100  * @param ids
2101  *   Selects specific statistics to be reset. When NULL, all statistics selected
2102  *   by *mode* will be reset. If non-NULL, must point to array of at least
2103  *   *nb_ids* size.
2104  * @param nb_ids
2105  *   The number of ids available from the *ids* array. Ignored when ids is NULL.
2106  * @return
2107  *   - zero: successfully reset the statistics to zero
2108  *   - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported.
2109  */
2110 int
2111 rte_event_dev_xstats_reset(uint8_t dev_id,
2112 			   enum rte_event_dev_xstats_mode mode,
2113 			   int16_t queue_port_id,
2114 			   const uint64_t ids[],
2115 			   uint32_t nb_ids);
2116 
2117 /**
2118  * Trigger the eventdev self test.
2119  *
2120  * @param dev_id
2121  *   The identifier of the device
2122  * @return
2123  *   - 0: Selftest successful
2124  *   - -ENOTSUP if the device doesn't support selftest
2125  *   - other values < 0 on failure.
2126  */
2127 int rte_event_dev_selftest(uint8_t dev_id);
2128 
2129 /**
2130  * Get the memory required per event vector based on the number of elements per
2131  * vector.
2132  * This should be used to create the mempool that holds the event vectors.
2133  *
2134  * @param name
2135  *   The name of the vector pool.
2136  * @param n
2137  *   The number of elements in the mbuf pool.
2138  * @param cache_size
2139  *   Size of the per-core object cache. See rte_mempool_create() for
2140  *   details.
2141  * @param nb_elem
2142  *   The number of elements that a single event vector should be able to hold.
2143  * @param socket_id
2144  *   The socket identifier where the memory should be allocated. The
2145  *   value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the
2146  *   reserved zone
2147  *
2148  * @return
2149  *   The pointer to the newly allocated mempool, on success. NULL on error
2150  *   with rte_errno set appropriately. Possible rte_errno values include:
2151  *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
2152  *    - E_RTE_SECONDARY - function was called from a secondary process instance
2153  *    - EINVAL - cache size provided is too large, or priv_size is not aligned.
2154  *    - ENOSPC - the maximum number of memzones has already been allocated
2155  *    - EEXIST - a memzone with the same name already exists
2156  *    - ENOMEM - no appropriate memory area found in which to create memzone
2157  *    - ENAMETOOLONG - mempool name requested is too long.
2158  */
2159 struct rte_mempool *
2160 rte_event_vector_pool_create(const char *name, unsigned int n,
2161 			     unsigned int cache_size, uint16_t nb_elem,
2162 			     int socket_id);
2163 
2164 #include <rte_eventdev_core.h>
2165 
2166 static __rte_always_inline uint16_t
2167 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2168 			  const struct rte_event ev[], uint16_t nb_events,
2169 			  const event_enqueue_burst_t fn)
2170 {
2171 	const struct rte_event_fp_ops *fp_ops;
2172 	void *port;
2173 
2174 	fp_ops = &rte_event_fp_ops[dev_id];
2175 	port = fp_ops->data[port_id];
2176 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2177 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2178 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2179 		rte_errno = EINVAL;
2180 		return 0;
2181 	}
2182 
2183 	if (port == NULL) {
2184 		rte_errno = EINVAL;
2185 		return 0;
2186 	}
2187 #endif
2188 	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2189 	/*
2190 	 * Allow zero cost non burst mode routine invocation if application
2191 	 * requests nb_events as const one
2192 	 */
2193 	if (nb_events == 1)
2194 		return (fp_ops->enqueue)(port, ev);
2195 	else
2196 		return fn(port, ev, nb_events);
2197 }
2198 
2199 /**
2200  * Enqueue a burst of events objects or an event object supplied in *rte_event*
2201  * structure on an  event device designated by its *dev_id* through the event
2202  * port specified by *port_id*. Each event object specifies the event queue on
2203  * which it will be enqueued.
2204  *
2205  * The *nb_events* parameter is the number of event objects to enqueue which are
2206  * supplied in the *ev* array of *rte_event* structure.
2207  *
2208  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2209  * enqueued to the same port that their associated events were dequeued from.
2210  *
2211  * The rte_event_enqueue_burst() function returns the number of
2212  * events objects it actually enqueued. A return value equal to *nb_events*
2213  * means that all event objects have been enqueued.
2214  *
2215  * @param dev_id
2216  *   The identifier of the device.
2217  * @param port_id
2218  *   The identifier of the event port.
2219  * @param ev
2220  *   Points to an array of *nb_events* objects of type *rte_event* structure
2221  *   which contain the event object enqueue operations to be processed.
2222  * @param nb_events
2223  *   The number of event objects to enqueue, typically number of
2224  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2225  *   available for this port.
2226  *
2227  * @return
2228  *   The number of event objects actually enqueued on the event device. The
2229  *   return value can be less than the value of the *nb_events* parameter when
2230  *   the event devices queue is full or if invalid parameters are specified in a
2231  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2232  *   events at the end of ev[] are not consumed and the caller has to take care
2233  *   of them, and rte_errno is set accordingly. Possible errno values include:
2234  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2235  *              ID is invalid, or an event's sched type doesn't match the
2236  *              capabilities of the destination queue.
2237  *   - ENOSPC   The event port was backpressured and unable to enqueue
2238  *              one or more events. This error code is only applicable to
2239  *              closed systems.
2240  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2241  */
2242 static inline uint16_t
2243 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2244 			const struct rte_event ev[], uint16_t nb_events)
2245 {
2246 	const struct rte_event_fp_ops *fp_ops;
2247 
2248 	fp_ops = &rte_event_fp_ops[dev_id];
2249 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2250 					 fp_ops->enqueue_burst);
2251 }
2252 
2253 /**
2254  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
2255  * an event device designated by its *dev_id* through the event port specified
2256  * by *port_id*.
2257  *
2258  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2259  * application can use this API when the all objects in the burst contains
2260  * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
2261  * function can provide the additional hint to the PMD and optimize if possible.
2262  *
2263  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2264  * has event object of operation type != RTE_EVENT_OP_NEW.
2265  *
2266  * @param dev_id
2267  *   The identifier of the device.
2268  * @param port_id
2269  *   The identifier of the event port.
2270  * @param ev
2271  *   Points to an array of *nb_events* objects of type *rte_event* structure
2272  *   which contain the event object enqueue operations to be processed.
2273  * @param nb_events
2274  *   The number of event objects to enqueue, typically number of
2275  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2276  *   available for this port.
2277  *
2278  * @return
2279  *   The number of event objects actually enqueued on the event device. The
2280  *   return value can be less than the value of the *nb_events* parameter when
2281  *   the event devices queue is full or if invalid parameters are specified in a
2282  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2283  *   events at the end of ev[] are not consumed and the caller has to take care
2284  *   of them, and rte_errno is set accordingly. Possible errno values include:
2285  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2286  *              ID is invalid, or an event's sched type doesn't match the
2287  *              capabilities of the destination queue.
2288  *   - ENOSPC   The event port was backpressured and unable to enqueue
2289  *              one or more events. This error code is only applicable to
2290  *              closed systems.
2291  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2292  * @see rte_event_enqueue_burst()
2293  */
2294 static inline uint16_t
2295 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2296 			    const struct rte_event ev[], uint16_t nb_events)
2297 {
2298 	const struct rte_event_fp_ops *fp_ops;
2299 
2300 	fp_ops = &rte_event_fp_ops[dev_id];
2301 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2302 					 fp_ops->enqueue_new_burst);
2303 }
2304 
2305 /**
2306  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
2307  * on an event device designated by its *dev_id* through the event port
2308  * specified by *port_id*.
2309  *
2310  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2311  * application can use this API when the all objects in the burst contains
2312  * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
2313  * function can provide the additional hint to the PMD and optimize if possible.
2314  *
2315  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2316  * has event object of operation type != RTE_EVENT_OP_FORWARD.
2317  *
2318  * @param dev_id
2319  *   The identifier of the device.
2320  * @param port_id
2321  *   The identifier of the event port.
2322  * @param ev
2323  *   Points to an array of *nb_events* objects of type *rte_event* structure
2324  *   which contain the event object enqueue operations to be processed.
2325  * @param nb_events
2326  *   The number of event objects to enqueue, typically number of
2327  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2328  *   available for this port.
2329  *
2330  * @return
2331  *   The number of event objects actually enqueued on the event device. The
2332  *   return value can be less than the value of the *nb_events* parameter when
2333  *   the event devices queue is full or if invalid parameters are specified in a
2334  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2335  *   events at the end of ev[] are not consumed and the caller has to take care
2336  *   of them, and rte_errno is set accordingly. Possible errno values include:
2337  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2338  *              ID is invalid, or an event's sched type doesn't match the
2339  *              capabilities of the destination queue.
2340  *   - ENOSPC   The event port was backpressured and unable to enqueue
2341  *              one or more events. This error code is only applicable to
2342  *              closed systems.
2343  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2344  * @see rte_event_enqueue_burst()
2345  */
2346 static inline uint16_t
2347 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2348 				const struct rte_event ev[], uint16_t nb_events)
2349 {
2350 	const struct rte_event_fp_ops *fp_ops;
2351 
2352 	fp_ops = &rte_event_fp_ops[dev_id];
2353 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2354 					 fp_ops->enqueue_forward_burst);
2355 }
2356 
2357 /**
2358  * Dequeue a burst of events objects or an event object from the event port
2359  * designated by its *event_port_id*, on an event device designated
2360  * by its *dev_id*.
2361  *
2362  * rte_event_dequeue_burst() does not dictate the specifics of scheduling
2363  * algorithm as each eventdev driver may have different criteria to schedule
2364  * an event. However, in general, from an application perspective scheduler may
2365  * use the following scheme to dispatch an event to the port.
2366  *
2367  * 1) Selection of event queue based on
2368  *   a) The list of event queues are linked to the event port.
2369  *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
2370  *   queue selection from list is based on event queue priority relative to
2371  *   other event queue supplied as *priority* in rte_event_queue_setup()
2372  *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
2373  *   queue selection from the list is based on event priority supplied as
2374  *   *priority* in rte_event_enqueue_burst()
2375  * 2) Selection of event
2376  *   a) The number of flows available in selected event queue.
2377  *   b) Schedule type method associated with the event
2378  *
2379  * The *nb_events* parameter is the maximum number of event objects to dequeue
2380  * which are returned in the *ev* array of *rte_event* structure.
2381  *
2382  * The rte_event_dequeue_burst() function returns the number of events objects
2383  * it actually dequeued. A return value equal to *nb_events* means that all
2384  * event objects have been dequeued.
2385  *
2386  * The number of events dequeued is the number of scheduler contexts held by
2387  * this port. These contexts are automatically released in the next
2388  * rte_event_dequeue_burst() invocation if the port supports implicit
2389  * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
2390  * operation can be used to release the contexts early.
2391  *
2392  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2393  * enqueued to the same port that their associated events were dequeued from.
2394  *
2395  * @param dev_id
2396  *   The identifier of the device.
2397  * @param port_id
2398  *   The identifier of the event port.
2399  * @param[out] ev
2400  *   Points to an array of *nb_events* objects of type *rte_event* structure
2401  *   for output to be populated with the dequeued event objects.
2402  * @param nb_events
2403  *   The maximum number of event objects to dequeue, typically number of
2404  *   rte_event_port_dequeue_depth() available for this port.
2405  *
2406  * @param timeout_ticks
2407  *   - 0 no-wait, returns immediately if there is no event.
2408  *   - >0 wait for the event, if the device is configured with
2409  *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
2410  *   at least one event is available or *timeout_ticks* time.
2411  *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
2412  *   then this function will wait until the event available or
2413  *   *dequeue_timeout_ns* ns which was previously supplied to
2414  *   rte_event_dev_configure()
2415  *
2416  * @return
2417  * The number of event objects actually dequeued from the port. The return
2418  * value can be less than the value of the *nb_events* parameter when the
2419  * event port's queue is not full.
2420  *
2421  * @see rte_event_port_dequeue_depth()
2422  */
2423 static inline uint16_t
2424 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2425 			uint16_t nb_events, uint64_t timeout_ticks)
2426 {
2427 	const struct rte_event_fp_ops *fp_ops;
2428 	void *port;
2429 
2430 	fp_ops = &rte_event_fp_ops[dev_id];
2431 	port = fp_ops->data[port_id];
2432 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2433 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2434 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2435 		rte_errno = EINVAL;
2436 		return 0;
2437 	}
2438 
2439 	if (port == NULL) {
2440 		rte_errno = EINVAL;
2441 		return 0;
2442 	}
2443 #endif
2444 	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2445 	/*
2446 	 * Allow zero cost non burst mode routine invocation if application
2447 	 * requests nb_events as const one
2448 	 */
2449 	if (nb_events == 1)
2450 		return (fp_ops->dequeue)(port, ev, timeout_ticks);
2451 	else
2452 		return (fp_ops->dequeue_burst)(port, ev, nb_events,
2453 					       timeout_ticks);
2454 }
2455 
2456 #define RTE_EVENT_DEV_MAINT_OP_FLUSH          (1 << 0)
2457 /**< Force an immediately flush of any buffered events in the port,
2458  * potentially at the cost of additional overhead.
2459  *
2460  * @see rte_event_maintain()
2461  */
2462 
2463 /**
2464  * Maintain an event device.
2465  *
2466  * This function is only relevant for event devices which do not have
2467  * the @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE flag set. Such devices
2468  * require an application thread using a particular port to
2469  * periodically call rte_event_maintain() on that port during periods
2470  * which it is neither attempting to enqueue events to nor dequeue
2471  * events from the port. rte_event_maintain() is a low-overhead
2472  * function and should be called at a high rate (e.g., in the
2473  * application's poll loop).
2474  *
2475  * No port may be left unmaintained.
2476  *
2477  * At the application thread's convenience, rte_event_maintain() may
2478  * (but is not required to) be called even during periods when enqueue
2479  * or dequeue functions are being called, at the cost of a slight
2480  * increase in overhead.
2481  *
2482  * rte_event_maintain() may be called on event devices which have set
2483  * @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, in which case it is a
2484  * no-operation.
2485  *
2486  * @param dev_id
2487  *   The identifier of the device.
2488  * @param port_id
2489  *   The identifier of the event port.
2490  * @param op
2491  *   0, or @ref RTE_EVENT_DEV_MAINT_OP_FLUSH.
2492  * @return
2493  *  - 0 on success.
2494  *  - -EINVAL if *dev_id*,  *port_id*, or *op* is invalid.
2495  *
2496  * @see RTE_EVENT_DEV_CAP_MAINTENANCE_FREE
2497  */
2498 static inline int
2499 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2500 {
2501 	const struct rte_event_fp_ops *fp_ops;
2502 	void *port;
2503 
2504 	fp_ops = &rte_event_fp_ops[dev_id];
2505 	port = fp_ops->data[port_id];
2506 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2507 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2508 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2509 		return -EINVAL;
2510 
2511 	if (port == NULL)
2512 		return -EINVAL;
2513 
2514 	if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2515 		return -EINVAL;
2516 #endif
2517 	rte_eventdev_trace_maintain(dev_id, port_id, op);
2518 
2519 	if (fp_ops->maintain != NULL)
2520 		fp_ops->maintain(port, op);
2521 
2522 	return 0;
2523 }
2524 
2525 /**
2526  * Change the active profile on an event port.
2527  *
2528  * This function is used to change the current active profile on an event port
2529  * when multiple link profiles are configured on an event port through the
2530  * function call ``rte_event_port_profile_links_set``.
2531  *
2532  * On the subsequent ``rte_event_dequeue_burst`` call, only the event queues
2533  * that were associated with the newly active profile will participate in
2534  * scheduling.
2535  *
2536  * @param dev_id
2537  *   The identifier of the device.
2538  * @param port_id
2539  *   The identifier of the event port.
2540  * @param profile_id
2541  *   The identifier of the profile.
2542  * @return
2543  *  - 0 on success.
2544  *  - -EINVAL if *dev_id*,  *port_id*, or *profile_id* is invalid.
2545  */
2546 static inline uint8_t
2547 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2548 {
2549 	const struct rte_event_fp_ops *fp_ops;
2550 	void *port;
2551 
2552 	fp_ops = &rte_event_fp_ops[dev_id];
2553 	port = fp_ops->data[port_id];
2554 
2555 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2556 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2557 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2558 		return -EINVAL;
2559 
2560 	if (port == NULL)
2561 		return -EINVAL;
2562 
2563 	if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2564 		return -EINVAL;
2565 #endif
2566 	rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2567 
2568 	return fp_ops->profile_switch(port, profile_id);
2569 }
2570 
2571 #ifdef __cplusplus
2572 }
2573 #endif
2574 
2575 #endif /* _RTE_EVENTDEV_H_ */
2576