xref: /dpdk/lib/eventdev/rte_eventdev.h (revision d029f35384d0844e9aeb5dbc46fbe1b063d649f7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
11 /**
12  * @file
13  *
14  * RTE Event Device API
15  *
16  * In a polling model, lcores poll ethdev ports and associated rx queues
17  * directly to look for packet. In an event driven model, by contrast, lcores
18  * call the scheduler that selects packets for them based on programmer
19  * specified criteria. Eventdev library adds support for event driven
20  * programming model, which offer applications automatic multicore scaling,
21  * dynamic load balancing, pipelining, packet ingress order maintenance and
22  * synchronization services to simplify application packet processing.
23  *
24  * The Event Device API is composed of two parts:
25  *
26  * - The application-oriented Event API that includes functions to setup
27  *   an event device (configure it, setup its queues, ports and start it), to
28  *   establish the link between queues to port and to receive events, and so on.
29  *
30  * - The driver-oriented Event API that exports a function allowing
31  *   an event poll Mode Driver (PMD) to simultaneously register itself as
32  *   an event device driver.
33  *
34  * Event device components:
35  *
36  *                     +-----------------+
37  *                     | +-------------+ |
38  *        +-------+    | |    flow 0   | |
39  *        |Packet |    | +-------------+ |
40  *        |event  |    | +-------------+ |
41  *        |       |    | |    flow 1   | |port_link(port0, queue0)
42  *        +-------+    | +-------------+ |     |     +--------+
43  *        +-------+    | +-------------+ o-----v-----o        |dequeue +------+
44  *        |Crypto |    | |    flow n   | |           | event  +------->|Core 0|
45  *        |work   |    | +-------------+ o----+      | port 0 |        |      |
46  *        |done ev|    |  event queue 0  |    |      +--------+        +------+
47  *        +-------+    +-----------------+    |
48  *        +-------+                           |
49  *        |Timer  |    +-----------------+    |      +--------+
50  *        |expiry |    | +-------------+ |    +------o        |dequeue +------+
51  *        |event  |    | |    flow 0   | o-----------o event  +------->|Core 1|
52  *        +-------+    | +-------------+ |      +----o port 1 |        |      |
53  *       Event enqueue | +-------------+ |      |    +--------+        +------+
54  *     o-------------> | |    flow 1   | |      |
55  *        enqueue(     | +-------------+ |      |
56  *        queue_id,    |                 |      |    +--------+        +------+
57  *        flow_id,     | +-------------+ |      |    |        |dequeue |Core 2|
58  *        sched_type,  | |    flow n   | o-----------o event  +------->|      |
59  *        event_type,  | +-------------+ |      |    | port 2 |        +------+
60  *        subev_type,  |  event queue 1  |      |    +--------+
61  *        event)       +-----------------+      |    +--------+
62  *                                              |    |        |dequeue +------+
63  *        +-------+    +-----------------+      |    | event  +------->|Core n|
64  *        |Core   |    | +-------------+ o-----------o port n |        |      |
65  *        |(SW)   |    | |    flow 0   | |      |    +--------+        +--+---+
66  *        |event  |    | +-------------+ |      |                         |
67  *        +-------+    | +-------------+ |      |                         |
68  *            ^        | |    flow 1   | |      |                         |
69  *            |        | +-------------+ o------+                         |
70  *            |        | +-------------+ |                                |
71  *            |        | |    flow n   | |                                |
72  *            |        | +-------------+ |                                |
73  *            |        |  event queue n  |                                |
74  *            |        +-----------------+                                |
75  *            |                                                           |
76  *            +-----------------------------------------------------------+
77  *
78  * Event device: A hardware or software-based event scheduler.
79  *
80  * Event: A unit of scheduling that encapsulates a packet or other datatype
81  * like SW generated event from the CPU, Crypto work completion notification,
82  * Timer expiry event notification etc as well as metadata.
83  * The metadata includes flow ID, scheduling type, event priority, event_type,
84  * sub_event_type etc.
85  *
86  * Event queue: A queue containing events that are scheduled by the event dev.
87  * An event queue contains events of different flows associated with scheduling
88  * types, such as atomic, ordered, or parallel.
89  *
90  * Event port: An application's interface into the event dev for enqueue and
91  * dequeue operations. Each event port can be linked with one or more
92  * event queues for dequeue operations.
93  *
94  * By default, all the functions of the Event Device API exported by a PMD
95  * are lock-free functions which assume to not be invoked in parallel on
96  * different logical cores to work on the same target object. For instance,
97  * the dequeue function of a PMD cannot be invoked in parallel on two logical
98  * cores to operates on same  event port. Of course, this function
99  * can be invoked in parallel by different logical cores on different ports.
100  * It is the responsibility of the upper level application to enforce this rule.
101  *
102  * In all functions of the Event API, the Event device is
103  * designated by an integer >= 0 named the device identifier *dev_id*
104  *
105  * At the Event driver level, Event devices are represented by a generic
106  * data structure of type *rte_event_dev*.
107  *
108  * Event devices are dynamically registered during the PCI/SoC device probing
109  * phase performed at EAL initialization time.
110  * When an Event device is being probed, a *rte_event_dev* structure and
111  * a new device identifier are allocated for that device. Then, the
112  * event_dev_init() function supplied by the Event driver matching the probed
113  * device is invoked to properly initialize the device.
114  *
115  * The role of the device init function consists of resetting the hardware or
116  * software event driver implementations.
117  *
118  * If the device init operation is successful, the correspondence between
119  * the device identifier assigned to the new device and its associated
120  * *rte_event_dev* structure is effectively registered.
121  * Otherwise, both the *rte_event_dev* structure and the device identifier are
122  * freed.
123  *
124  * The functions exported by the application Event API to setup a device
125  * designated by its device identifier must be invoked in the following order:
126  *     - rte_event_dev_configure()
127  *     - rte_event_queue_setup()
128  *     - rte_event_port_setup()
129  *     - rte_event_port_link()
130  *     - rte_event_dev_start()
131  *
132  * Then, the application can invoke, in any order, the functions
133  * exported by the Event API to schedule events, dequeue events, enqueue events,
134  * change event queue(s) to event port [un]link establishment and so on.
135  *
136  * Application may use rte_event_[queue/port]_default_conf_get() to get the
137  * default configuration to set up an event queue or event port by
138  * overriding few default values.
139  *
140  * If the application wants to change the configuration (i.e. call
141  * rte_event_dev_configure(), rte_event_queue_setup(), or
142  * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the
143  * device and then do the reconfiguration before calling rte_event_dev_start()
144  * again. The schedule, enqueue and dequeue functions should not be invoked
145  * when the device is stopped.
146  *
147  * Finally, an application can close an Event device by invoking the
148  * rte_event_dev_close() function.
149  *
150  * Each function of the application Event API invokes a specific function
151  * of the PMD that controls the target device designated by its device
152  * identifier.
153  *
154  * For this purpose, all device-specific functions of an Event driver are
155  * supplied through a set of pointers contained in a generic structure of type
156  * *event_dev_ops*.
157  * The address of the *event_dev_ops* structure is stored in the *rte_event_dev*
158  * structure by the device init function of the Event driver, which is
159  * invoked during the PCI/SoC device probing phase, as explained earlier.
160  *
161  * In other words, each function of the Event API simply retrieves the
162  * *rte_event_dev* structure associated with the device identifier and
163  * performs an indirect invocation of the corresponding driver function
164  * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure.
165  *
166  * For performance reasons, the address of the fast-path functions of the
167  * Event driver is not contained in the *event_dev_ops* structure.
168  * Instead, they are directly stored at the beginning of the *rte_event_dev*
169  * structure to avoid an extra indirect memory access during their invocation.
170  *
171  * RTE event device drivers do not use interrupts for enqueue or dequeue
172  * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue
173  * functions to applications.
174  *
175  * The events are injected to event device through *enqueue* operation by
176  * event producers in the system. The typical event producers are ethdev
177  * subsystem for generating packet events, CPU(SW) for generating events based
178  * on different stages of application processing, cryptodev for generating
179  * crypto work completion notification etc
180  *
181  * The *dequeue* operation gets one or more events from the event ports.
182  * The application process the events and send to downstream event queue through
183  * rte_event_enqueue_burst() if it is an intermediate stage of event processing,
184  * on the final stage, the application may use Tx adapter API for maintaining
185  * the ingress order and then send the packet/event on the wire.
186  *
187  * The point at which events are scheduled to ports depends on the device.
188  * For hardware devices, scheduling occurs asynchronously without any software
189  * intervention. Software schedulers can either be distributed
190  * (each worker thread schedules events to its own port) or centralized
191  * (a dedicated thread schedules to all ports). Distributed software schedulers
192  * perform the scheduling in rte_event_dequeue_burst(), whereas centralized
193  * scheduler logic need a dedicated service core for scheduling.
194  * The RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag is not set
195  * indicates the device is centralized and thus needs a dedicated scheduling
196  * thread that repeatedly calls software specific scheduling function.
197  *
198  * An event driven worker thread has following typical workflow on fastpath:
199  * \code{.c}
200  *	while (1) {
201  *		rte_event_dequeue_burst(...);
202  *		(event processing)
203  *		rte_event_enqueue_burst(...);
204  *	}
205  * \endcode
206  */
207 
208 #ifdef __cplusplus
209 extern "C" {
210 #endif
211 
212 #include <rte_compat.h>
213 #include <rte_common.h>
214 #include <rte_errno.h>
215 #include <rte_mbuf_pool_ops.h>
216 #include <rte_mempool.h>
217 
218 #include "rte_eventdev_trace_fp.h"
219 
220 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
221 struct rte_event;
222 
223 /* Event device capability bitmap flags */
224 #define RTE_EVENT_DEV_CAP_QUEUE_QOS           (1ULL << 0)
225 /**< Event scheduling prioritization is based on the priority and weight
226  * associated with each event queue. Events from a queue with highest priority
227  * is scheduled first. If the queues are of same priority, weight of the queues
228  * are considered to select a queue in a weighted round robin fashion.
229  * Subsequent dequeue calls from an event port could see events from the same
230  * event queue, if the queue is configured with an affinity count. Affinity
231  * count is the number of subsequent dequeue calls, in which an event port
232  * should use the same event queue if the queue is non-empty
233  *
234  *  @see rte_event_queue_setup(), rte_event_queue_attr_set()
235  */
236 #define RTE_EVENT_DEV_CAP_EVENT_QOS           (1ULL << 1)
237 /**< Event scheduling prioritization is based on the priority associated with
238  *  each event. Priority of each event is supplied in *rte_event* structure
239  *  on each enqueue operation.
240  *
241  *  @see rte_event_enqueue_burst()
242  */
243 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED   (1ULL << 2)
244 /**< Event device operates in distributed scheduling mode.
245  * In distributed scheduling mode, event scheduling happens in HW or
246  * rte_event_dequeue_burst() or the combination of these two.
247  * If the flag is not set then eventdev is centralized and thus needs a
248  * dedicated service core that acts as a scheduling thread .
249  *
250  * @see rte_event_dequeue_burst()
251  */
252 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES     (1ULL << 3)
253 /**< Event device is capable of enqueuing events of any type to any queue.
254  * If this capability is not set, the queue only supports events of the
255  *  *RTE_SCHED_TYPE_* type that it was created with.
256  *
257  * @see RTE_SCHED_TYPE_* values
258  */
259 #define RTE_EVENT_DEV_CAP_BURST_MODE          (1ULL << 4)
260 /**< Event device is capable of operating in burst mode for enqueue(forward,
261  * release) and dequeue operation. If this capability is not set, application
262  * still uses the rte_event_dequeue_burst() and rte_event_enqueue_burst() but
263  * PMD accepts only one event at a time.
264  *
265  * @see rte_event_dequeue_burst() rte_event_enqueue_burst()
266  */
267 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE    (1ULL << 5)
268 /**< Event device ports support disabling the implicit release feature, in
269  * which the port will release all unreleased events in its dequeue operation.
270  * If this capability is set and the port is configured with implicit release
271  * disabled, the application is responsible for explicitly releasing events
272  * using either the RTE_EVENT_OP_FORWARD or the RTE_EVENT_OP_RELEASE event
273  * enqueue operations.
274  *
275  * @see rte_event_dequeue_burst() rte_event_enqueue_burst()
276  */
277 
278 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE         (1ULL << 6)
279 /**< Event device is capable of operating in none sequential mode. The path
280  * of the event is not necessary to be sequential. Application can change
281  * the path of event at runtime. If the flag is not set, then event each event
282  * will follow a path from queue 0 to queue 1 to queue 2 etc. If the flag is
283  * set, events may be sent to queues in any order. If the flag is not set, the
284  * eventdev will return an error when the application enqueues an event for a
285  * qid which is not the next in the sequence.
286  */
287 
288 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK   (1ULL << 7)
289 /**< Event device is capable of configuring the queue/port link at runtime.
290  * If the flag is not set, the eventdev queue/port link is only can be
291  * configured during  initialization.
292  */
293 
294 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
295 /**< Event device is capable of setting up the link between multiple queue
296  * with single port. If the flag is not set, the eventdev can only map a
297  * single queue to each port or map a single queue to many port.
298  */
299 
300 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
301 /**< Event device preserves the flow ID from the enqueued
302  * event to the dequeued event if the flag is set. Otherwise,
303  * the content of this field is implementation dependent.
304  */
305 
306 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
307 /**< Event device *does not* require calls to rte_event_maintain().
308  * An event device that does not set this flag requires calls to
309  * rte_event_maintain() during periods when neither
310  * rte_event_dequeue_burst() nor rte_event_enqueue_burst() are called
311  * on a port. This will allow the event device to perform internal
312  * processing, such as flushing buffered events, return credits to a
313  * global pool, or process signaling related to load balancing.
314  */
315 
316 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
317 /**< Event device is capable of changing the queue attributes at runtime i.e
318  * after rte_event_queue_setup() or rte_event_start() call sequence. If this
319  * flag is not set, eventdev queue attributes can only be configured during
320  * rte_event_queue_setup().
321  */
322 
323 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
324 /**< Event device is capable of supporting multiple link profiles per event port
325  * i.e., the value of `rte_event_dev_info::max_profiles_per_port` is greater
326  * than one.
327  */
328 
329 /* Event device priority levels */
330 #define RTE_EVENT_DEV_PRIORITY_HIGHEST   0
331 /**< Highest priority expressed across eventdev subsystem
332  * @see rte_event_queue_setup(), rte_event_enqueue_burst()
333  * @see rte_event_port_link()
334  */
335 #define RTE_EVENT_DEV_PRIORITY_NORMAL    128
336 /**< Normal priority expressed across eventdev subsystem
337  * @see rte_event_queue_setup(), rte_event_enqueue_burst()
338  * @see rte_event_port_link()
339  */
340 #define RTE_EVENT_DEV_PRIORITY_LOWEST    255
341 /**< Lowest priority expressed across eventdev subsystem
342  * @see rte_event_queue_setup(), rte_event_enqueue_burst()
343  * @see rte_event_port_link()
344  */
345 
346 /* Event queue scheduling weights */
347 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
348 /**< Highest weight of an event queue
349  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
350  */
351 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
352 /**< Lowest weight of an event queue
353  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
354  */
355 
356 /* Event queue scheduling affinity */
357 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
358 /**< Highest scheduling affinity of an event queue
359  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
360  */
361 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
362 /**< Lowest scheduling affinity of an event queue
363  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
364  */
365 
366 /**
367  * Get the total number of event devices that have been successfully
368  * initialised.
369  *
370  * @return
371  *   The total number of usable event devices.
372  */
373 uint8_t
374 rte_event_dev_count(void);
375 
376 /**
377  * Get the device identifier for the named event device.
378  *
379  * @param name
380  *   Event device name to select the event device identifier.
381  *
382  * @return
383  *   Returns event device identifier on success.
384  *   - <0: Failure to find named event device.
385  */
386 int
387 rte_event_dev_get_dev_id(const char *name);
388 
389 /**
390  * Return the NUMA socket to which a device is connected.
391  *
392  * @param dev_id
393  *   The identifier of the device.
394  * @return
395  *   The NUMA socket id to which the device is connected or
396  *   a default of zero if the socket could not be determined.
397  *   -(-EINVAL)  dev_id value is out of range.
398  */
399 int
400 rte_event_dev_socket_id(uint8_t dev_id);
401 
402 /**
403  * Event device information
404  */
405 struct rte_event_dev_info {
406 	const char *driver_name;	/**< Event driver name */
407 	struct rte_device *dev;	/**< Device information */
408 	uint32_t min_dequeue_timeout_ns;
409 	/**< Minimum supported global dequeue timeout(ns) by this device */
410 	uint32_t max_dequeue_timeout_ns;
411 	/**< Maximum supported global dequeue timeout(ns) by this device */
412 	uint32_t dequeue_timeout_ns;
413 	/**< Configured global dequeue timeout(ns) for this device */
414 	uint8_t max_event_queues;
415 	/**< Maximum event_queues supported by this device */
416 	uint32_t max_event_queue_flows;
417 	/**< Maximum supported flows in an event queue by this device*/
418 	uint8_t max_event_queue_priority_levels;
419 	/**< Maximum number of event queue priority levels by this device.
420 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
421 	 */
422 	uint8_t max_event_priority_levels;
423 	/**< Maximum number of event priority levels by this device.
424 	 * Valid when the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability
425 	 */
426 	uint8_t max_event_ports;
427 	/**< Maximum number of event ports supported by this device */
428 	uint8_t max_event_port_dequeue_depth;
429 	/**< Maximum number of events can be dequeued at a time from an
430 	 * event port by this device.
431 	 * A device that does not support bulk dequeue will set this as 1.
432 	 */
433 	uint32_t max_event_port_enqueue_depth;
434 	/**< Maximum number of events can be enqueued at a time from an
435 	 * event port by this device.
436 	 * A device that does not support bulk enqueue will set this as 1.
437 	 */
438 	uint8_t max_event_port_links;
439 	/**< Maximum number of queues that can be linked to a single event
440 	 * port by this device.
441 	 */
442 	int32_t max_num_events;
443 	/**< A *closed system* event dev has a limit on the number of events it
444 	 * can manage at a time. An *open system* event dev does not have a
445 	 * limit and will specify this as -1.
446 	 */
447 	uint32_t event_dev_cap;
448 	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
449 	uint8_t max_single_link_event_port_queue_pairs;
450 	/**< Maximum number of event ports and queues that are optimized for
451 	 * (and only capable of) single-link configurations supported by this
452 	 * device. These ports and queues are not accounted for in
453 	 * max_event_ports or max_event_queues.
454 	 */
455 	uint8_t max_profiles_per_port;
456 	/**< Maximum number of event queue profiles per event port.
457 	 * A device that doesn't support multiple profiles will set this as 1.
458 	 */
459 };
460 
461 /**
462  * Retrieve the contextual information of an event device.
463  *
464  * @param dev_id
465  *   The identifier of the device.
466  *
467  * @param[out] dev_info
468  *   A pointer to a structure of type *rte_event_dev_info* to be filled with the
469  *   contextual information of the device.
470  *
471  * @return
472  *   - 0: Success, driver updates the contextual information of the event device
473  *   - <0: Error code returned by the driver info get function.
474  */
475 int
476 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
477 
478 /**
479  * The count of ports.
480  */
481 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
482 /**
483  * The count of queues.
484  */
485 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
486 /**
487  * The status of the device, zero for stopped, non-zero for started.
488  */
489 #define RTE_EVENT_DEV_ATTR_STARTED 2
490 
491 /**
492  * Get an attribute from a device.
493  *
494  * @param dev_id Eventdev id
495  * @param attr_id The attribute ID to retrieve
496  * @param[out] attr_value A pointer that will be filled in with the attribute
497  *             value if successful.
498  *
499  * @return
500  *   - 0: Successfully retrieved attribute value
501  *   - -EINVAL: Invalid device or  *attr_id* provided, or *attr_value* is NULL
502  */
503 int
504 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
505 		       uint32_t *attr_value);
506 
507 
508 /* Event device configuration bitmap flags */
509 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
510 /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns.
511  *  @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst()
512  */
513 
514 /** Event device configuration structure */
515 struct rte_event_dev_config {
516 	uint32_t dequeue_timeout_ns;
517 	/**< rte_event_dequeue_burst() timeout on this device.
518 	 * This value should be in the range of *min_dequeue_timeout_ns* and
519 	 * *max_dequeue_timeout_ns* which previously provided in
520 	 * rte_event_dev_info_get()
521 	 * The value 0 is allowed, in which case, default dequeue timeout used.
522 	 * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
523 	 */
524 	int32_t nb_events_limit;
525 	/**< In a *closed system* this field is the limit on maximum number of
526 	 * events that can be inflight in the eventdev at a given time. The
527 	 * limit is required to ensure that the finite space in a closed system
528 	 * is not overwhelmed. The value cannot exceed the *max_num_events*
529 	 * as provided by rte_event_dev_info_get().
530 	 * This value should be set to -1 for *open system*.
531 	 */
532 	uint8_t nb_event_queues;
533 	/**< Number of event queues to configure on this device.
534 	 * This value cannot exceed the *max_event_queues* which previously
535 	 * provided in rte_event_dev_info_get()
536 	 */
537 	uint8_t nb_event_ports;
538 	/**< Number of event ports to configure on this device.
539 	 * This value cannot exceed the *max_event_ports* which previously
540 	 * provided in rte_event_dev_info_get()
541 	 */
542 	uint32_t nb_event_queue_flows;
543 	/**< Number of flows for any event queue on this device.
544 	 * This value cannot exceed the *max_event_queue_flows* which previously
545 	 * provided in rte_event_dev_info_get()
546 	 */
547 	uint32_t nb_event_port_dequeue_depth;
548 	/**< Maximum number of events can be dequeued at a time from an
549 	 * event port by this device.
550 	 * This value cannot exceed the *max_event_port_dequeue_depth*
551 	 * which previously provided in rte_event_dev_info_get().
552 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
553 	 * @see rte_event_port_setup()
554 	 */
555 	uint32_t nb_event_port_enqueue_depth;
556 	/**< Maximum number of events can be enqueued at a time from an
557 	 * event port by this device.
558 	 * This value cannot exceed the *max_event_port_enqueue_depth*
559 	 * which previously provided in rte_event_dev_info_get().
560 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
561 	 * @see rte_event_port_setup()
562 	 */
563 	uint32_t event_dev_cfg;
564 	/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
565 	uint8_t nb_single_link_event_port_queues;
566 	/**< Number of event ports and queues that will be singly-linked to
567 	 * each other. These are a subset of the overall event ports and
568 	 * queues; this value cannot exceed *nb_event_ports* or
569 	 * *nb_event_queues*. If the device has ports and queues that are
570 	 * optimized for single-link usage, this field is a hint for how many
571 	 * to allocate; otherwise, regular event ports and queues can be used.
572 	 */
573 };
574 
575 /**
576  * Configure an event device.
577  *
578  * This function must be invoked first before any other function in the
579  * API. This function can also be re-invoked when a device is in the
580  * stopped state.
581  *
582  * The caller may use rte_event_dev_info_get() to get the capability of each
583  * resources available for this event device.
584  *
585  * @param dev_id
586  *   The identifier of the device to configure.
587  * @param dev_conf
588  *   The event device configuration structure.
589  *
590  * @return
591  *   - 0: Success, device configured.
592  *   - <0: Error code returned by the driver configuration function.
593  */
594 int
595 rte_event_dev_configure(uint8_t dev_id,
596 			const struct rte_event_dev_config *dev_conf);
597 
598 /* Event queue specific APIs */
599 
600 /* Event queue configuration bitmap flags */
601 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES          (1ULL << 0)
602 /**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue
603  *
604  * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
605  * @see rte_event_enqueue_burst()
606  */
607 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK        (1ULL << 1)
608 /**< This event queue links only to a single event port.
609  *
610  *  @see rte_event_port_setup(), rte_event_port_link()
611  */
612 
613 /** Event queue configuration structure */
614 struct rte_event_queue_conf {
615 	uint32_t nb_atomic_flows;
616 	/**< The maximum number of active flows this queue can track at any
617 	 * given time. If the queue is configured for atomic scheduling (by
618 	 * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg
619 	 * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the
620 	 * value must be in the range of [1, nb_event_queue_flows], which was
621 	 * previously provided in rte_event_dev_configure().
622 	 */
623 	uint32_t nb_atomic_order_sequences;
624 	/**< The maximum number of outstanding events waiting to be
625 	 * reordered by this queue. In other words, the number of entries in
626 	 * this queue’s reorder buffer.When the number of events in the
627 	 * reorder buffer reaches to *nb_atomic_order_sequences* then the
628 	 * scheduler cannot schedule the events from this queue and invalid
629 	 * event will be returned from dequeue until one or more entries are
630 	 * freed up/released.
631 	 * If the queue is configured for ordered scheduling (by applying the
632 	 * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or
633 	 * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must
634 	 * be in the range of [1, nb_event_queue_flows], which was
635 	 * previously supplied to rte_event_dev_configure().
636 	 */
637 	uint32_t event_queue_cfg;
638 	/**< Queue cfg flags(EVENT_QUEUE_CFG_) */
639 	uint8_t schedule_type;
640 	/**< Queue schedule type(RTE_SCHED_TYPE_*).
641 	 * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in
642 	 * event_queue_cfg.
643 	 */
644 	uint8_t priority;
645 	/**< Priority for this event queue relative to other event queues.
646 	 * The requested priority should in the range of
647 	 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
648 	 * The implementation shall normalize the requested priority to
649 	 * event device supported priority value.
650 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
651 	 */
652 	uint8_t weight;
653 	/**< Weight of the event queue relative to other event queues.
654 	 * The requested weight should be in the range of
655 	 * [RTE_EVENT_DEV_WEIGHT_HIGHEST, RTE_EVENT_DEV_WEIGHT_LOWEST].
656 	 * The implementation shall normalize the requested weight to event
657 	 * device supported weight value.
658 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
659 	 */
660 	uint8_t affinity;
661 	/**< Affinity of the event queue relative to other event queues.
662 	 * The requested affinity should be in the range of
663 	 * [RTE_EVENT_DEV_AFFINITY_HIGHEST, RTE_EVENT_DEV_AFFINITY_LOWEST].
664 	 * The implementation shall normalize the requested affinity to event
665 	 * device supported affinity value.
666 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
667 	 */
668 };
669 
670 /**
671  * Retrieve the default configuration information of an event queue designated
672  * by its *queue_id* from the event driver for an event device.
673  *
674  * This function intended to be used in conjunction with rte_event_queue_setup()
675  * where caller needs to set up the queue by overriding few default values.
676  *
677  * @param dev_id
678  *   The identifier of the device.
679  * @param queue_id
680  *   The index of the event queue to get the configuration information.
681  *   The value must be in the range [0, nb_event_queues - 1]
682  *   previously supplied to rte_event_dev_configure().
683  * @param[out] queue_conf
684  *   The pointer to the default event queue configuration data.
685  * @return
686  *   - 0: Success, driver updates the default event queue configuration data.
687  *   - <0: Error code returned by the driver info get function.
688  *
689  * @see rte_event_queue_setup()
690  */
691 int
692 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
693 				 struct rte_event_queue_conf *queue_conf);
694 
695 /**
696  * Allocate and set up an event queue for an event device.
697  *
698  * @param dev_id
699  *   The identifier of the device.
700  * @param queue_id
701  *   The index of the event queue to setup. The value must be in the range
702  *   [0, nb_event_queues - 1] previously supplied to rte_event_dev_configure().
703  * @param queue_conf
704  *   The pointer to the configuration data to be used for the event queue.
705  *   NULL value is allowed, in which case default configuration	used.
706  *
707  * @see rte_event_queue_default_conf_get()
708  *
709  * @return
710  *   - 0: Success, event queue correctly set up.
711  *   - <0: event queue configuration failed
712  */
713 int
714 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
715 		      const struct rte_event_queue_conf *queue_conf);
716 
717 /**
718  * The priority of the queue.
719  */
720 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
721 /**
722  * The number of atomic flows configured for the queue.
723  */
724 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
725 /**
726  * The number of atomic order sequences configured for the queue.
727  */
728 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
729 /**
730  * The cfg flags for the queue.
731  */
732 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
733 /**
734  * The schedule type of the queue.
735  */
736 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
737 /**
738  * The weight of the queue.
739  */
740 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
741 /**
742  * Affinity of the queue.
743  */
744 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
745 
746 /**
747  * Get an attribute from a queue.
748  *
749  * @param dev_id
750  *   Eventdev id
751  * @param queue_id
752  *   Eventdev queue id
753  * @param attr_id
754  *   The attribute ID to retrieve
755  * @param[out] attr_value
756  *   A pointer that will be filled in with the attribute value if successful
757  *
758  * @return
759  *   - 0: Successfully returned value
760  *   - -EINVAL: invalid device, queue or attr_id provided, or attr_value was
761  *		NULL
762  *   - -EOVERFLOW: returned when attr_id is set to
763  *   RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to
764  *   RTE_EVENT_QUEUE_CFG_ALL_TYPES
765  */
766 int
767 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
768 			uint32_t *attr_value);
769 
770 /**
771  * Set an event queue attribute.
772  *
773  * @param dev_id
774  *   Eventdev id
775  * @param queue_id
776  *   Eventdev queue id
777  * @param attr_id
778  *   The attribute ID to set
779  * @param attr_value
780  *   The attribute value to set
781  *
782  * @return
783  *   - 0: Successfully set attribute.
784  *   - -EINVAL: invalid device, queue or attr_id.
785  *   - -ENOTSUP: device does not support setting the event attribute.
786  *   - <0: failed to set event queue attribute
787  */
788 int
789 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
790 			 uint64_t attr_value);
791 
792 /* Event port specific APIs */
793 
794 /* Event port configuration bitmap flags */
795 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
796 /**< Configure the port not to release outstanding events in
797  * rte_event_dev_dequeue_burst(). If set, all events received through
798  * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
799  * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
800  * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
801  */
802 #define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)
803 /**< This event port links only to a single event queue.
804  *
805  *  @see rte_event_port_setup(), rte_event_port_link()
806  */
807 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER       (1ULL << 2)
808 /**< Hint that this event port will primarily enqueue events to the system.
809  * A PMD can optimize its internal workings by assuming that this port is
810  * primarily going to enqueue NEW events.
811  *
812  * Note that this flag is only a hint, so PMDs must operate under the
813  * assumption that any port can enqueue an event with any type of op.
814  *
815  *  @see rte_event_port_setup()
816  */
817 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER       (1ULL << 3)
818 /**< Hint that this event port will primarily dequeue events from the system.
819  * A PMD can optimize its internal workings by assuming that this port is
820  * primarily going to consume events, and not enqueue FORWARD or RELEASE
821  * events.
822  *
823  * Note that this flag is only a hint, so PMDs must operate under the
824  * assumption that any port can enqueue an event with any type of op.
825  *
826  *  @see rte_event_port_setup()
827  */
828 #define RTE_EVENT_PORT_CFG_HINT_WORKER         (1ULL << 4)
829 /**< Hint that this event port will primarily pass existing events through.
830  * A PMD can optimize its internal workings by assuming that this port is
831  * primarily going to FORWARD events, and not enqueue NEW or RELEASE events
832  * often.
833  *
834  * Note that this flag is only a hint, so PMDs must operate under the
835  * assumption that any port can enqueue an event with any type of op.
836  *
837  *  @see rte_event_port_setup()
838  */
839 
840 /** Event port configuration structure */
841 struct rte_event_port_conf {
842 	int32_t new_event_threshold;
843 	/**< A backpressure threshold for new event enqueues on this port.
844 	 * Use for *closed system* event dev where event capacity is limited,
845 	 * and cannot exceed the capacity of the event dev.
846 	 * Configuring ports with different thresholds can make higher priority
847 	 * traffic less likely to  be backpressured.
848 	 * For example, a port used to inject NIC Rx packets into the event dev
849 	 * can have a lower threshold so as not to overwhelm the device,
850 	 * while ports used for worker pools can have a higher threshold.
851 	 * This value cannot exceed the *nb_events_limit*
852 	 * which was previously supplied to rte_event_dev_configure().
853 	 * This should be set to '-1' for *open system*.
854 	 */
855 	uint16_t dequeue_depth;
856 	/**< Configure number of bulk dequeues for this event port.
857 	 * This value cannot exceed the *nb_event_port_dequeue_depth*
858 	 * which previously supplied to rte_event_dev_configure().
859 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
860 	 */
861 	uint16_t enqueue_depth;
862 	/**< Configure number of bulk enqueues for this event port.
863 	 * This value cannot exceed the *nb_event_port_enqueue_depth*
864 	 * which previously supplied to rte_event_dev_configure().
865 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
866 	 */
867 	uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */
868 };
869 
870 /**
871  * Retrieve the default configuration information of an event port designated
872  * by its *port_id* from the event driver for an event device.
873  *
874  * This function intended to be used in conjunction with rte_event_port_setup()
875  * where caller needs to set up the port by overriding few default values.
876  *
877  * @param dev_id
878  *   The identifier of the device.
879  * @param port_id
880  *   The index of the event port to get the configuration information.
881  *   The value must be in the range [0, nb_event_ports - 1]
882  *   previously supplied to rte_event_dev_configure().
883  * @param[out] port_conf
884  *   The pointer to the default event port configuration data
885  * @return
886  *   - 0: Success, driver updates the default event port configuration data.
887  *   - <0: Error code returned by the driver info get function.
888  *
889  * @see rte_event_port_setup()
890  */
891 int
892 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
893 				struct rte_event_port_conf *port_conf);
894 
895 /**
896  * Allocate and set up an event port for an event device.
897  *
898  * @param dev_id
899  *   The identifier of the device.
900  * @param port_id
901  *   The index of the event port to setup. The value must be in the range
902  *   [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
903  * @param port_conf
904  *   The pointer to the configuration data to be used for the queue.
905  *   NULL value is allowed, in which case default configuration	used.
906  *
907  * @see rte_event_port_default_conf_get()
908  *
909  * @return
910  *   - 0: Success, event port correctly set up.
911  *   - <0: Port configuration failed
912  *   - (-EDQUOT) Quota exceeded(Application tried to link the queue configured
913  *   with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
914  */
915 int
916 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
917 		     const struct rte_event_port_conf *port_conf);
918 
919 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
920 					  struct rte_event event, void *arg);
921 /**< Callback function prototype that can be passed during
922  * rte_event_port_release(), invoked once per a released event.
923  */
924 
925 /**
926  * Quiesce any core specific resources consumed by the event port.
927  *
928  * Event ports are generally coupled with lcores, and a given Hardware
929  * implementation might require the PMD to store port specific data in the
930  * lcore.
931  * When the application decides to migrate the event port to another lcore
932  * or teardown the current lcore it may to call `rte_event_port_quiesce`
933  * to make sure that all the data associated with the event port are released
934  * from the lcore, this might also include any prefetched events.
935  * While releasing the event port from the lcore, this function calls the
936  * user-provided flush callback once per event.
937  *
938  * @note Invocation of this API does not affect the existing port configuration.
939  *
940  * @param dev_id
941  *   The identifier of the device.
942  * @param port_id
943  *   The index of the event port to setup. The value must be in the range
944  *   [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
945  * @param release_cb
946  *   Callback function invoked once per flushed event.
947  * @param args
948  *   Argument supplied to callback.
949  */
950 void
951 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
952 		       rte_eventdev_port_flush_t release_cb, void *args);
953 
954 /**
955  * The queue depth of the port on the enqueue side
956  */
957 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
958 /**
959  * The queue depth of the port on the dequeue side
960  */
961 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
962 /**
963  * The new event threshold of the port
964  */
965 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
966 /**
967  * The implicit release disable attribute of the port
968  */
969 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
970 
971 /**
972  * Get an attribute from a port.
973  *
974  * @param dev_id
975  *   Eventdev id
976  * @param port_id
977  *   Eventdev port id
978  * @param attr_id
979  *   The attribute ID to retrieve
980  * @param[out] attr_value
981  *   A pointer that will be filled in with the attribute value if successful
982  *
983  * @return
984  *   - 0: Successfully returned value
985  *   - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL
986  */
987 int
988 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
989 			uint32_t *attr_value);
990 
991 /**
992  * Start an event device.
993  *
994  * The device start step is the last one and consists of setting the event
995  * queues to start accepting the events and schedules to event ports.
996  *
997  * On success, all basic functions exported by the API (event enqueue,
998  * event dequeue and so on) can be invoked.
999  *
1000  * @param dev_id
1001  *   Event device identifier
1002  * @return
1003  *   - 0: Success, device started.
1004  *   - -ESTALE : Not all ports of the device are configured
1005  *   - -ENOLINK: Not all queues are linked, which could lead to deadlock.
1006  */
1007 int
1008 rte_event_dev_start(uint8_t dev_id);
1009 
1010 /**
1011  * Stop an event device.
1012  *
1013  * This function causes all queued events to be drained, including those
1014  * residing in event ports. While draining events out of the device, this
1015  * function calls the user-provided flush callback (if one was registered) once
1016  * per event.
1017  *
1018  * The device can be restarted with a call to rte_event_dev_start(). Threads
1019  * that continue to enqueue/dequeue while the device is stopped, or being
1020  * stopped, will result in undefined behavior. This includes event adapters,
1021  * which must be stopped prior to stopping the eventdev.
1022  *
1023  * @param dev_id
1024  *   Event device identifier.
1025  *
1026  * @see rte_event_dev_stop_flush_callback_register()
1027  */
1028 void
1029 rte_event_dev_stop(uint8_t dev_id);
1030 
1031 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1032 					  struct rte_event event, void *arg);
1033 /**< Callback function called during rte_event_dev_stop(), invoked once per
1034  * flushed event.
1035  */
1036 
1037 /**
1038  * Registers a callback function to be invoked during rte_event_dev_stop() for
1039  * each flushed event. This function can be used to properly dispose of queued
1040  * events, for example events containing memory pointers.
1041  *
1042  * The callback function is only registered for the calling process. The
1043  * callback function must be registered in every process that can call
1044  * rte_event_dev_stop().
1045  *
1046  * To unregister a callback, call this function with a NULL callback pointer.
1047  *
1048  * @param dev_id
1049  *   The identifier of the device.
1050  * @param callback
1051  *   Callback function invoked once per flushed event.
1052  * @param userdata
1053  *   Argument supplied to callback.
1054  *
1055  * @return
1056  *  - 0 on success.
1057  *  - -EINVAL if *dev_id* is invalid
1058  *
1059  * @see rte_event_dev_stop()
1060  */
1061 int rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1062 					       rte_eventdev_stop_flush_t callback, void *userdata);
1063 
1064 /**
1065  * Close an event device. The device cannot be restarted!
1066  *
1067  * @param dev_id
1068  *   Event device identifier
1069  *
1070  * @return
1071  *  - 0 on successfully closing device
1072  *  - <0 on failure to close device
1073  *  - (-EAGAIN) if device is busy
1074  */
1075 int
1076 rte_event_dev_close(uint8_t dev_id);
1077 
1078 /**
1079  * Event vector structure.
1080  */
1081 struct rte_event_vector {
1082 	uint16_t nb_elem;
1083 	/**< Number of elements valid in this event vector. */
1084 	uint16_t elem_offset : 12;
1085 	/**< Offset into the vector array where valid elements start from. */
1086 	uint16_t rsvd : 3;
1087 	/**< Reserved for future use */
1088 	uint16_t attr_valid : 1;
1089 	/**< Indicates that the below union attributes have valid information.
1090 	 */
1091 	union {
1092 		/* Used by Rx/Tx adapter.
1093 		 * Indicates that all the elements in this vector belong to the
1094 		 * same port and queue pair when originating from Rx adapter,
1095 		 * valid only when event type is ETHDEV_VECTOR or
1096 		 * ETH_RX_ADAPTER_VECTOR.
1097 		 * Can also be used to indicate the Tx adapter the destination
1098 		 * port and queue of the mbufs in the vector
1099 		 */
1100 		struct {
1101 			uint16_t port;
1102 			/* Ethernet device port id. */
1103 			uint16_t queue;
1104 			/* Ethernet device queue id. */
1105 		};
1106 	};
1107 	/**< Union to hold common attributes of the vector array. */
1108 	uint64_t impl_opaque;
1109 
1110 /* empty structures do not have zero size in C++ leading to compilation errors
1111  * with clang about structure having different sizes in C and C++.
1112  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1113  * C++ builds, removing the warning.
1114  */
1115 #ifndef __cplusplus
1116 	/**< Implementation specific opaque value.
1117 	 * An implementation may use this field to hold implementation specific
1118 	 * value to share between dequeue and enqueue operation.
1119 	 * The application should not modify this field.
1120 	 */
1121 	union {
1122 #endif
1123 		struct rte_mbuf *mbufs[0];
1124 		void *ptrs[0];
1125 		uint64_t u64s[0];
1126 #ifndef __cplusplus
1127 	} __rte_aligned(16);
1128 #endif
1129 	/**< Start of the vector array union. Depending upon the event type the
1130 	 * vector array can be an array of mbufs or pointers or opaque u64
1131 	 * values.
1132 	 */
1133 } __rte_aligned(16);
1134 
1135 /* Scheduler type definitions */
1136 #define RTE_SCHED_TYPE_ORDERED          0
1137 /**< Ordered scheduling
1138  *
1139  * Events from an ordered flow of an event queue can be scheduled to multiple
1140  * ports for concurrent processing while maintaining the original event order.
1141  * This scheme enables the user to achieve high single flow throughput by
1142  * avoiding SW synchronization for ordering between ports which bound to cores.
1143  *
1144  * The source flow ordering from an event queue is maintained when events are
1145  * enqueued to their destination queue within the same ordered flow context.
1146  * An event port holds the context until application call
1147  * rte_event_dequeue_burst() from the same port, which implicitly releases
1148  * the context.
1149  * User may allow the scheduler to release the context earlier than that
1150  * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation.
1151  *
1152  * Events from the source queue appear in their original order when dequeued
1153  * from a destination queue.
1154  * Event ordering is based on the received event(s), but also other
1155  * (newly allocated or stored) events are ordered when enqueued within the same
1156  * ordered context. Events not enqueued (e.g. released or stored) within the
1157  * context are  considered missing from reordering and are skipped at this time
1158  * (but can be ordered again within another context).
1159  *
1160  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1161  */
1162 
1163 #define RTE_SCHED_TYPE_ATOMIC           1
1164 /**< Atomic scheduling
1165  *
1166  * Events from an atomic flow of an event queue can be scheduled only to a
1167  * single port at a time. The port is guaranteed to have exclusive (atomic)
1168  * access to the associated flow context, which enables the user to avoid SW
1169  * synchronization. Atomic flows also help to maintain event ordering
1170  * since only one port at a time can process events from a flow of an
1171  * event queue.
1172  *
1173  * The atomic queue synchronization context is dedicated to the port until
1174  * application call rte_event_dequeue_burst() from the same port,
1175  * which implicitly releases the context. User may allow the scheduler to
1176  * release the context earlier than that by invoking rte_event_enqueue_burst()
1177  * with RTE_EVENT_OP_RELEASE operation.
1178  *
1179  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1180  */
1181 
1182 #define RTE_SCHED_TYPE_PARALLEL         2
1183 /**< Parallel scheduling
1184  *
1185  * The scheduler performs priority scheduling, load balancing, etc. functions
1186  * but does not provide additional event synchronization or ordering.
1187  * It is free to schedule events from a single parallel flow of an event queue
1188  * to multiple events ports for concurrent processing.
1189  * The application is responsible for flow context synchronization and
1190  * event ordering (SW synchronization).
1191  *
1192  * @see rte_event_queue_setup(), rte_event_dequeue_burst()
1193  */
1194 
1195 /* Event types to classify the event source */
1196 #define RTE_EVENT_TYPE_ETHDEV           0x0
1197 /**< The event generated from ethdev subsystem */
1198 #define RTE_EVENT_TYPE_CRYPTODEV        0x1
1199 /**< The event generated from crypodev subsystem */
1200 #define RTE_EVENT_TYPE_TIMER		0x2
1201 /**< The event generated from event timer adapter */
1202 #define RTE_EVENT_TYPE_CPU              0x3
1203 /**< The event generated from cpu for pipelining.
1204  * Application may use *sub_event_type* to further classify the event
1205  */
1206 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER   0x4
1207 /**< The event generated from event eth Rx adapter */
1208 #define RTE_EVENT_TYPE_DMADEV           0x5
1209 /**< The event generated from dma subsystem */
1210 #define RTE_EVENT_TYPE_VECTOR           0x8
1211 /**< Indicates that event is a vector.
1212  * All vector event types should be a logical OR of EVENT_TYPE_VECTOR.
1213  * This simplifies the pipeline design as one can split processing the events
1214  * between vector events and normal event across event types.
1215  * Example:
1216  *	if (ev.event_type & RTE_EVENT_TYPE_VECTOR) {
1217  *		// Classify and handle vector event.
1218  *	} else {
1219  *		// Classify and handle event.
1220  *	}
1221  */
1222 #define RTE_EVENT_TYPE_ETHDEV_VECTOR                                           \
1223 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1224 /**< The event vector generated from ethdev subsystem */
1225 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1226 /**< The event vector generated from cpu for pipelining. */
1227 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR                                   \
1228 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1229 /**< The event vector generated from eth Rx adapter. */
1230 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
1231 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1232 /**< The event vector generated from cryptodev adapter. */
1233 
1234 #define RTE_EVENT_TYPE_MAX              0x10
1235 /**< Maximum number of event types */
1236 
1237 /* Event enqueue operations */
1238 #define RTE_EVENT_OP_NEW                0
1239 /**< The event producers use this operation to inject a new event to the
1240  * event device.
1241  */
1242 #define RTE_EVENT_OP_FORWARD            1
1243 /**< The CPU use this operation to forward the event to different event queue or
1244  * change to new application specific flow or schedule type to enable
1245  * pipelining.
1246  *
1247  * This operation must only be enqueued to the same port that the
1248  * event to be forwarded was dequeued from.
1249  */
1250 #define RTE_EVENT_OP_RELEASE            2
1251 /**< Release the flow context associated with the schedule type.
1252  *
1253  * If current flow's scheduler type method is *RTE_SCHED_TYPE_ATOMIC*
1254  * then this function hints the scheduler that the user has completed critical
1255  * section processing in the current atomic context.
1256  * The scheduler is now allowed to schedule events from the same flow from
1257  * an event queue to another port. However, the context may be still held
1258  * until the next rte_event_dequeue_burst() call, this call allows but does not
1259  * force the scheduler to release the context early.
1260  *
1261  * Early atomic context release may increase parallelism and thus system
1262  * performance, but the user needs to design carefully the split into critical
1263  * vs non-critical sections.
1264  *
1265  * If current flow's scheduler type method is *RTE_SCHED_TYPE_ORDERED*
1266  * then this function hints the scheduler that the user has done all that need
1267  * to maintain event order in the current ordered context.
1268  * The scheduler is allowed to release the ordered context of this port and
1269  * avoid reordering any following enqueues.
1270  *
1271  * Early ordered context release may increase parallelism and thus system
1272  * performance.
1273  *
1274  * If current flow's scheduler type method is *RTE_SCHED_TYPE_PARALLEL*
1275  * or no scheduling context is held then this function may be an NOOP,
1276  * depending on the implementation.
1277  *
1278  * This operation must only be enqueued to the same port that the
1279  * event to be released was dequeued from.
1280  */
1281 
1282 /**
1283  * The generic *rte_event* structure to hold the event attributes
1284  * for dequeue and enqueue operation
1285  */
1286 struct rte_event {
1287 	/** WORD0 */
1288 	union {
1289 		uint64_t event;
1290 		/** Event attributes for dequeue or enqueue operation */
1291 		struct {
1292 			uint32_t flow_id:20;
1293 			/**< Targeted flow identifier for the enqueue and
1294 			 * dequeue operation.
1295 			 * The value must be in the range of
1296 			 * [0, nb_event_queue_flows - 1] which
1297 			 * previously supplied to rte_event_dev_configure().
1298 			 */
1299 			uint32_t sub_event_type:8;
1300 			/**< Sub-event types based on the event source.
1301 			 * @see RTE_EVENT_TYPE_CPU
1302 			 */
1303 			uint32_t event_type:4;
1304 			/**< Event type to classify the event source.
1305 			 * @see RTE_EVENT_TYPE_ETHDEV, (RTE_EVENT_TYPE_*)
1306 			 */
1307 			uint8_t op:2;
1308 			/**< The type of event enqueue operation - new/forward/
1309 			 * etc.This field is not preserved across an instance
1310 			 * and is undefined on dequeue.
1311 			 * @see RTE_EVENT_OP_NEW, (RTE_EVENT_OP_*)
1312 			 */
1313 			uint8_t rsvd:4;
1314 			/**< Reserved for future use */
1315 			uint8_t sched_type:2;
1316 			/**< Scheduler synchronization type (RTE_SCHED_TYPE_*)
1317 			 * associated with flow id on a given event queue
1318 			 * for the enqueue and dequeue operation.
1319 			 */
1320 			uint8_t queue_id;
1321 			/**< Targeted event queue identifier for the enqueue or
1322 			 * dequeue operation.
1323 			 * The value must be in the range of
1324 			 * [0, nb_event_queues - 1] which previously supplied to
1325 			 * rte_event_dev_configure().
1326 			 */
1327 			uint8_t priority;
1328 			/**< Event priority relative to other events in the
1329 			 * event queue. The requested priority should in the
1330 			 * range of  [RTE_EVENT_DEV_PRIORITY_HIGHEST,
1331 			 * RTE_EVENT_DEV_PRIORITY_LOWEST].
1332 			 * The implementation shall normalize the requested
1333 			 * priority to supported priority value.
1334 			 * Valid when the device has
1335 			 * RTE_EVENT_DEV_CAP_EVENT_QOS capability.
1336 			 */
1337 			uint8_t impl_opaque;
1338 			/**< Implementation specific opaque value.
1339 			 * An implementation may use this field to hold
1340 			 * implementation specific value to share between
1341 			 * dequeue and enqueue operation.
1342 			 * The application should not modify this field.
1343 			 */
1344 		};
1345 	};
1346 	/** WORD1 */
1347 	union {
1348 		uint64_t u64;
1349 		/**< Opaque 64-bit value */
1350 		void *event_ptr;
1351 		/**< Opaque event pointer */
1352 		struct rte_mbuf *mbuf;
1353 		/**< mbuf pointer if dequeued event is associated with mbuf */
1354 		struct rte_event_vector *vec;
1355 		/**< Event vector pointer. */
1356 	};
1357 };
1358 
1359 /* Ethdev Rx adapter capability bitmap flags */
1360 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT	0x1
1361 /**< This flag is sent when the packet transfer mechanism is in HW.
1362  * Ethdev can send packets to the event device using internal event port.
1363  */
1364 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ	0x2
1365 /**< Adapter supports multiple event queues per ethdev. Every ethdev
1366  * Rx queue can be connected to a unique event queue.
1367  */
1368 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
1369 /**< The application can override the adapter generated flow ID in the
1370  * event. This flow ID can be specified when adding an ethdev Rx queue
1371  * to the adapter using the ev.flow_id member.
1372  * @see struct rte_event_eth_rx_adapter_queue_conf::ev
1373  * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
1374  */
1375 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR	0x8
1376 /**< Adapter supports event vectorization per ethdev. */
1377 
1378 /**
1379  * Retrieve the event device's ethdev Rx adapter capabilities for the
1380  * specified ethernet port
1381  *
1382  * @param dev_id
1383  *   The identifier of the device.
1384  *
1385  * @param eth_port_id
1386  *   The identifier of the ethernet device.
1387  *
1388  * @param[out] caps
1389  *   A pointer to memory filled with Rx event adapter capabilities.
1390  *
1391  * @return
1392  *   - 0: Success, driver provides Rx event adapter capabilities for the
1393  *	ethernet device.
1394  *   - <0: Error code returned by the driver function.
1395  */
1396 int
1397 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1398 				uint32_t *caps);
1399 
1400 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1401 /**< This flag is set when the timer mechanism is in HW. */
1402 
1403 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC      (1ULL << 1)
1404 /**< This flag is set if periodic mode is supported. */
1405 
1406 /**
1407  * Retrieve the event device's timer adapter capabilities.
1408  *
1409  * @param dev_id
1410  *   The identifier of the device.
1411  *
1412  * @param[out] caps
1413  *   A pointer to memory to be filled with event timer adapter capabilities.
1414  *
1415  * @return
1416  *   - 0: Success, driver provided event timer adapter capabilities.
1417  *   - <0: Error code returned by the driver function.
1418  */
1419 int
1420 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1421 
1422 /* Crypto adapter capability bitmap flag */
1423 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW   0x1
1424 /**< Flag indicates HW is capable of generating events in
1425  * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send
1426  * packets to the event device as new events using an internal
1427  * event port.
1428  */
1429 
1430 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD   0x2
1431 /**< Flag indicates HW is capable of generating events in
1432  * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send
1433  * packets to the event device as forwarded event using an
1434  * internal event port.
1435  */
1436 
1437 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND  0x4
1438 /**< Flag indicates HW is capable of mapping crypto queue pair to
1439  * event queue.
1440  */
1441 
1442 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA   0x8
1443 /**< Flag indicates HW/SW supports a mechanism to store and retrieve
1444  * the private data information along with the crypto session.
1445  */
1446 
1447 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
1448 /**< Flag indicates HW is capable of aggregating processed
1449  * crypto operations into rte_event_vector.
1450  */
1451 
1452 /**
1453  * Retrieve the event device's crypto adapter capabilities for the
1454  * specified cryptodev device
1455  *
1456  * @param dev_id
1457  *   The identifier of the device.
1458  *
1459  * @param cdev_id
1460  *   The identifier of the cryptodev device.
1461  *
1462  * @param[out] caps
1463  *   A pointer to memory filled with event adapter capabilities.
1464  *   It is expected to be pre-allocated & initialized by caller.
1465  *
1466  * @return
1467  *   - 0: Success, driver provides event adapter capabilities for the
1468  *     cryptodev device.
1469  *   - <0: Error code returned by the driver function.
1470  */
1471 int
1472 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1473 				  uint32_t *caps);
1474 
1475 /* DMA adapter capability bitmap flag */
1476 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1477 /**< Flag indicates HW is capable of generating events in
1478  * RTE_EVENT_OP_NEW enqueue operation. DMADEV will send
1479  * packets to the event device as new events using an
1480  * internal event port.
1481  */
1482 
1483 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1484 /**< Flag indicates HW is capable of generating events in
1485  * RTE_EVENT_OP_FORWARD enqueue operation. DMADEV will send
1486  * packets to the event device as forwarded event using an
1487  * internal event port.
1488  */
1489 
1490 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1491 /**< Flag indicates HW is capable of mapping DMA vchan to event queue. */
1492 
1493 /**
1494  * Retrieve the event device's DMA adapter capabilities for the
1495  * specified dmadev device
1496  *
1497  * @param dev_id
1498  *   The identifier of the device.
1499  *
1500  * @param dmadev_id
1501  *   The identifier of the dmadev device.
1502  *
1503  * @param[out] caps
1504  *   A pointer to memory filled with event adapter capabilities.
1505  *   It is expected to be pre-allocated & initialized by caller.
1506  *
1507  * @return
1508  *   - 0: Success, driver provides event adapter capabilities for the
1509  *     dmadev device.
1510  *   - <0: Error code returned by the driver function.
1511  *
1512  */
1513 __rte_experimental
1514 int
1515 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1516 
1517 /* Ethdev Tx adapter capability bitmap flags */
1518 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT	0x1
1519 /**< This flag is sent when the PMD supports a packet transmit callback
1520  */
1521 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR	0x2
1522 /**< Indicates that the Tx adapter is capable of handling event vector of
1523  * mbufs.
1524  */
1525 
1526 /**
1527  * Retrieve the event device's eth Tx adapter capabilities
1528  *
1529  * @param dev_id
1530  *   The identifier of the device.
1531  *
1532  * @param eth_port_id
1533  *   The identifier of the ethernet device.
1534  *
1535  * @param[out] caps
1536  *   A pointer to memory filled with eth Tx adapter capabilities.
1537  *
1538  * @return
1539  *   - 0: Success, driver provides eth Tx adapter capabilities.
1540  *   - <0: Error code returned by the driver function.
1541  */
1542 int
1543 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1544 				uint32_t *caps);
1545 
1546 /**
1547  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
1548  *
1549  * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag
1550  * then application can use this function to convert timeout value in
1551  * nanoseconds to implementations specific timeout value supplied in
1552  * rte_event_dequeue_burst()
1553  *
1554  * @param dev_id
1555  *   The identifier of the device.
1556  * @param ns
1557  *   Wait time in nanosecond
1558  * @param[out] timeout_ticks
1559  *   Value for the *timeout_ticks* parameter in rte_event_dequeue_burst()
1560  *
1561  * @return
1562  *  - 0 on success.
1563  *  - -ENOTSUP if the device doesn't support timeouts
1564  *  - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL
1565  *  - other values < 0 on failure.
1566  *
1567  * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
1568  * @see rte_event_dev_configure()
1569  */
1570 int
1571 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1572 					uint64_t *timeout_ticks);
1573 
1574 /**
1575  * Link multiple source event queues supplied in *queues* to the destination
1576  * event port designated by its *port_id* with associated service priority
1577  * supplied in *priorities* on the event device designated by its *dev_id*.
1578  *
1579  * The link establishment shall enable the event port *port_id* from
1580  * receiving events from the specified event queue(s) supplied in *queues*
1581  *
1582  * An event queue may link to one or more event ports.
1583  * The number of links can be established from an event queue to event port is
1584  * implementation defined.
1585  *
1586  * Event queue(s) to event port link establishment can be changed at runtime
1587  * without re-configuring the device to support scaling and to reduce the
1588  * latency of critical work by establishing the link with more event ports
1589  * at runtime.
1590  *
1591  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
1592  * than or equal to one, this function links the event queues to the default
1593  * profile_id i.e. profile_id 0 of the event port.
1594  *
1595  * @param dev_id
1596  *   The identifier of the device.
1597  *
1598  * @param port_id
1599  *   Event port identifier to select the destination port to link.
1600  *
1601  * @param queues
1602  *   Points to an array of *nb_links* event queues to be linked
1603  *   to the event port.
1604  *   NULL value is allowed, in which case this function links all the configured
1605  *   event queues *nb_event_queues* which previously supplied to
1606  *   rte_event_dev_configure() to the event port *port_id*
1607  *
1608  * @param priorities
1609  *   Points to an array of *nb_links* service priorities associated with each
1610  *   event queue link to event port.
1611  *   The priority defines the event port's servicing priority for
1612  *   event queue, which may be ignored by an implementation.
1613  *   The requested priority should in the range of
1614  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
1615  *   The implementation shall normalize the requested priority to
1616  *   implementation supported priority value.
1617  *   NULL value is allowed, in which case this function links the event queues
1618  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
1619  *
1620  * @param nb_links
1621  *   The number of links to establish. This parameter is ignored if queues is
1622  *   NULL.
1623  *
1624  * @return
1625  * The number of links actually established. The return value can be less than
1626  * the value of the *nb_links* parameter when the implementation has the
1627  * limitation on specific queue to port link establishment or if invalid
1628  * parameters are specified in *queues*
1629  * If the return value is less than *nb_links*, the remaining links at the end
1630  * of link[] are not established, and the caller has to take care of them.
1631  * If return value is less than *nb_links* then implementation shall update the
1632  * rte_errno accordingly, Possible rte_errno values are
1633  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
1634  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1635  * (EINVAL) Invalid parameter
1636  */
1637 int
1638 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1639 		    const uint8_t queues[], const uint8_t priorities[],
1640 		    uint16_t nb_links);
1641 
1642 /**
1643  * Unlink multiple source event queues supplied in *queues* from the destination
1644  * event port designated by its *port_id* on the event device designated
1645  * by its *dev_id*.
1646  *
1647  * The unlink call issues an async request to disable the event port *port_id*
1648  * from receiving events from the specified event queue *queue_id*.
1649  * Event queue(s) to event port unlink establishment can be changed at runtime
1650  * without re-configuring the device.
1651  *
1652  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
1653  * than or equal to one, this function unlinks the event queues from the default
1654  * profile identifier i.e. profile 0 of the event port.
1655  *
1656  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
1657  *
1658  * @param dev_id
1659  *   The identifier of the device.
1660  *
1661  * @param port_id
1662  *   Event port identifier to select the destination port to unlink.
1663  *
1664  * @param queues
1665  *   Points to an array of *nb_unlinks* event queues to be unlinked
1666  *   from the event port.
1667  *   NULL value is allowed, in which case this function unlinks all the
1668  *   event queue(s) from the event port *port_id*.
1669  *
1670  * @param nb_unlinks
1671  *   The number of unlinks to establish. This parameter is ignored if queues is
1672  *   NULL.
1673  *
1674  * @return
1675  * The number of unlinks successfully requested. The return value can be less
1676  * than the value of the *nb_unlinks* parameter when the implementation has the
1677  * limitation on specific queue to port unlink establishment or
1678  * if invalid parameters are specified.
1679  * If the return value is less than *nb_unlinks*, the remaining queues at the
1680  * end of queues[] are not unlinked, and the caller has to take care of them.
1681  * If return value is less than *nb_unlinks* then implementation shall update
1682  * the rte_errno accordingly, Possible rte_errno values are
1683  * (EINVAL) Invalid parameter
1684  */
1685 int
1686 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1687 		      uint8_t queues[], uint16_t nb_unlinks);
1688 
1689 /**
1690  * Link multiple source event queues supplied in *queues* to the destination
1691  * event port designated by its *port_id* with associated profile identifier
1692  * supplied in *profile_id* with service priorities supplied in *priorities*
1693  * on the event device designated by its *dev_id*.
1694  *
1695  * If *profile_id* is set to 0 then, the links created by the call `rte_event_port_link`
1696  * will be overwritten.
1697  *
1698  * Event ports by default use profile_id 0 unless it is changed using the
1699  * call ``rte_event_port_profile_switch()``.
1700  *
1701  * The link establishment shall enable the event port *port_id* from
1702  * receiving events from the specified event queue(s) supplied in *queues*
1703  *
1704  * An event queue may link to one or more event ports.
1705  * The number of links can be established from an event queue to event port is
1706  * implementation defined.
1707  *
1708  * Event queue(s) to event port link establishment can be changed at runtime
1709  * without re-configuring the device to support scaling and to reduce the
1710  * latency of critical work by establishing the link with more event ports
1711  * at runtime.
1712  *
1713  * @param dev_id
1714  *   The identifier of the device.
1715  *
1716  * @param port_id
1717  *   Event port identifier to select the destination port to link.
1718  *
1719  * @param queues
1720  *   Points to an array of *nb_links* event queues to be linked
1721  *   to the event port.
1722  *   NULL value is allowed, in which case this function links all the configured
1723  *   event queues *nb_event_queues* which previously supplied to
1724  *   rte_event_dev_configure() to the event port *port_id*
1725  *
1726  * @param priorities
1727  *   Points to an array of *nb_links* service priorities associated with each
1728  *   event queue link to event port.
1729  *   The priority defines the event port's servicing priority for
1730  *   event queue, which may be ignored by an implementation.
1731  *   The requested priority should in the range of
1732  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
1733  *   The implementation shall normalize the requested priority to
1734  *   implementation supported priority value.
1735  *   NULL value is allowed, in which case this function links the event queues
1736  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
1737  *
1738  * @param nb_links
1739  *   The number of links to establish. This parameter is ignored if queues is
1740  *   NULL.
1741  *
1742  * @param profile_id
1743  *   The profile identifier associated with the links between event queues and
1744  *   event port. Should be less than the max capability reported by
1745  *   ``rte_event_dev_info::max_profiles_per_port``
1746  *
1747  * @return
1748  * The number of links actually established. The return value can be less than
1749  * the value of the *nb_links* parameter when the implementation has the
1750  * limitation on specific queue to port link establishment or if invalid
1751  * parameters are specified in *queues*
1752  * If the return value is less than *nb_links*, the remaining links at the end
1753  * of link[] are not established, and the caller has to take care of them.
1754  * If return value is less than *nb_links* then implementation shall update the
1755  * rte_errno accordingly, Possible rte_errno values are
1756  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
1757  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1758  * (EINVAL) Invalid parameter
1759  *
1760  */
1761 __rte_experimental
1762 int
1763 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
1764 				 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
1765 
1766 /**
1767  * Unlink multiple source event queues supplied in *queues* that belong to profile
1768  * designated by *profile_id* from the destination event port designated by its
1769  * *port_id* on the event device designated by its *dev_id*.
1770  *
1771  * If *profile_id* is set to 0 i.e., the default profile then, then this function
1772  * will act as ``rte_event_port_unlink``.
1773  *
1774  * The unlink call issues an async request to disable the event port *port_id*
1775  * from receiving events from the specified event queue *queue_id*.
1776  * Event queue(s) to event port unlink establishment can be changed at runtime
1777  * without re-configuring the device.
1778  *
1779  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
1780  *
1781  * @param dev_id
1782  *   The identifier of the device.
1783  *
1784  * @param port_id
1785  *   Event port identifier to select the destination port to unlink.
1786  *
1787  * @param queues
1788  *   Points to an array of *nb_unlinks* event queues to be unlinked
1789  *   from the event port.
1790  *   NULL value is allowed, in which case this function unlinks all the
1791  *   event queue(s) from the event port *port_id*.
1792  *
1793  * @param nb_unlinks
1794  *   The number of unlinks to establish. This parameter is ignored if queues is
1795  *   NULL.
1796  *
1797  * @param profile_id
1798  *   The profile identifier associated with the links between event queues and
1799  *   event port. Should be less than the max capability reported by
1800  *   ``rte_event_dev_info::max_profiles_per_port``
1801  *
1802  * @return
1803  * The number of unlinks successfully requested. The return value can be less
1804  * than the value of the *nb_unlinks* parameter when the implementation has the
1805  * limitation on specific queue to port unlink establishment or
1806  * if invalid parameters are specified.
1807  * If the return value is less than *nb_unlinks*, the remaining queues at the
1808  * end of queues[] are not unlinked, and the caller has to take care of them.
1809  * If return value is less than *nb_unlinks* then implementation shall update
1810  * the rte_errno accordingly, Possible rte_errno values are
1811  * (EINVAL) Invalid parameter
1812  *
1813  */
1814 __rte_experimental
1815 int
1816 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1817 			      uint16_t nb_unlinks, uint8_t profile_id);
1818 
1819 /**
1820  * Returns the number of unlinks in progress.
1821  *
1822  * This function provides the application with a method to detect when an
1823  * unlink has been completed by the implementation.
1824  *
1825  * @see rte_event_port_unlink() to issue unlink requests.
1826  *
1827  * @param dev_id
1828  *   The identifier of the device.
1829  *
1830  * @param port_id
1831  *   Event port identifier to select port to check for unlinks in progress.
1832  *
1833  * @return
1834  * The number of unlinks that are in progress. A return of zero indicates that
1835  * there are no outstanding unlink requests. A positive return value indicates
1836  * the number of unlinks that are in progress, but are not yet complete.
1837  * A negative return value indicates an error, -EINVAL indicates an invalid
1838  * parameter passed for *dev_id* or *port_id*.
1839  */
1840 int
1841 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
1842 
1843 /**
1844  * Retrieve the list of source event queues and its associated service priority
1845  * linked to the destination event port designated by its *port_id*
1846  * on the event device designated by its *dev_id*.
1847  *
1848  * @param dev_id
1849  *   The identifier of the device.
1850  *
1851  * @param port_id
1852  *   Event port identifier.
1853  *
1854  * @param[out] queues
1855  *   Points to an array of *queues* for output.
1856  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1857  *   store the event queue(s) linked with event port *port_id*
1858  *
1859  * @param[out] priorities
1860  *   Points to an array of *priorities* for output.
1861  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1862  *   store the service priority associated with each event queue linked
1863  *
1864  * @return
1865  * The number of links established on the event port designated by its
1866  *  *port_id*.
1867  * - <0 on failure.
1868  */
1869 int
1870 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1871 			 uint8_t queues[], uint8_t priorities[]);
1872 
1873 /**
1874  * Retrieve the list of source event queues and its service priority
1875  * associated to a *profile_id* and linked to the destination event port
1876  * designated by its *port_id* on the event device designated by its *dev_id*.
1877  *
1878  * @param dev_id
1879  *   The identifier of the device.
1880  *
1881  * @param port_id
1882  *   Event port identifier.
1883  *
1884  * @param[out] queues
1885  *   Points to an array of *queues* for output.
1886  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1887  *   store the event queue(s) linked with event port *port_id*
1888  *
1889  * @param[out] priorities
1890  *   Points to an array of *priorities* for output.
1891  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1892  *   store the service priority associated with each event queue linked
1893  *
1894  * @param profile_id
1895  *   The profile identifier associated with the links between event queues and
1896  *   event port. Should be less than the max capability reported by
1897  *   ``rte_event_dev_info::max_profiles_per_port``
1898  *
1899  * @return
1900  * The number of links established on the event port designated by its
1901  *  *port_id*.
1902  * - <0 on failure.
1903  */
1904 __rte_experimental
1905 int
1906 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1907 				 uint8_t priorities[], uint8_t profile_id);
1908 
1909 /**
1910  * Retrieve the service ID of the event dev. If the adapter doesn't use
1911  * a rte_service function, this function returns -ESRCH.
1912  *
1913  * @param dev_id
1914  *   The identifier of the device.
1915  *
1916  * @param [out] service_id
1917  *   A pointer to a uint32_t, to be filled in with the service id.
1918  *
1919  * @return
1920  *   - 0: Success
1921  *   - <0: Error code on failure, if the event dev doesn't use a rte_service
1922  *   function, this function returns -ESRCH.
1923  */
1924 int
1925 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1926 
1927 /**
1928  * Dump internal information about *dev_id* to the FILE* provided in *f*.
1929  *
1930  * @param dev_id
1931  *   The identifier of the device.
1932  *
1933  * @param f
1934  *   A pointer to a file for output
1935  *
1936  * @return
1937  *   - 0: on success
1938  *   - <0: on failure.
1939  */
1940 int
1941 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1942 
1943 /** Maximum name length for extended statistics counters */
1944 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1945 
1946 /**
1947  * Selects the component of the eventdev to retrieve statistics from.
1948  */
1949 enum rte_event_dev_xstats_mode {
1950 	RTE_EVENT_DEV_XSTATS_DEVICE,
1951 	RTE_EVENT_DEV_XSTATS_PORT,
1952 	RTE_EVENT_DEV_XSTATS_QUEUE,
1953 };
1954 
1955 /**
1956  * A name-key lookup element for extended statistics.
1957  *
1958  * This structure is used to map between names and ID numbers
1959  * for extended ethdev statistics.
1960  */
1961 struct rte_event_dev_xstats_name {
1962 	char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1963 };
1964 
1965 /**
1966  * Retrieve names of extended statistics of an event device.
1967  *
1968  * @param dev_id
1969  *   The identifier of the event device.
1970  * @param mode
1971  *   The mode of statistics to retrieve. Choices include the device statistics,
1972  *   port statistics or queue statistics.
1973  * @param queue_port_id
1974  *   Used to specify the port or queue number in queue or port mode, and is
1975  *   ignored in device mode.
1976  * @param[out] xstats_names
1977  *   Block of memory to insert names into. Must be at least size in capacity.
1978  *   If set to NULL, function returns required capacity.
1979  * @param[out] ids
1980  *   Block of memory to insert ids into. Must be at least size in capacity.
1981  *   If set to NULL, function returns required capacity. The id values returned
1982  *   can be passed to *rte_event_dev_xstats_get* to select statistics.
1983  * @param size
1984  *   Capacity of xstats_names (number of names).
1985  * @return
1986  *   - positive value lower or equal to size: success. The return value
1987  *     is the number of entries filled in the stats table.
1988  *   - positive value higher than size: error, the given statistics table
1989  *     is too small. The return value corresponds to the size that should
1990  *     be given to succeed. The entries in the table are not valid and
1991  *     shall not be used by the caller.
1992  *   - negative value on error:
1993  *        -ENODEV for invalid *dev_id*
1994  *        -EINVAL for invalid mode, queue port or id parameters
1995  *        -ENOTSUP if the device doesn't support this function.
1996  */
1997 int
1998 rte_event_dev_xstats_names_get(uint8_t dev_id,
1999 			       enum rte_event_dev_xstats_mode mode,
2000 			       uint8_t queue_port_id,
2001 			       struct rte_event_dev_xstats_name *xstats_names,
2002 			       uint64_t *ids,
2003 			       unsigned int size);
2004 
2005 /**
2006  * Retrieve extended statistics of an event device.
2007  *
2008  * @param dev_id
2009  *   The identifier of the device.
2010  * @param mode
2011  *  The mode of statistics to retrieve. Choices include the device statistics,
2012  *  port statistics or queue statistics.
2013  * @param queue_port_id
2014  *   Used to specify the port or queue number in queue or port mode, and is
2015  *   ignored in device mode.
2016  * @param ids
2017  *   The id numbers of the stats to get. The ids can be got from the stat
2018  *   position in the stat list from rte_event_dev_get_xstats_names(), or
2019  *   by using rte_event_dev_xstats_by_name_get().
2020  * @param[out] values
2021  *   The values for each stats request by ID.
2022  * @param n
2023  *   The number of stats requested
2024  * @return
2025  *   - positive value: number of stat entries filled into the values array
2026  *   - negative value on error:
2027  *        -ENODEV for invalid *dev_id*
2028  *        -EINVAL for invalid mode, queue port or id parameters
2029  *        -ENOTSUP if the device doesn't support this function.
2030  */
2031 int
2032 rte_event_dev_xstats_get(uint8_t dev_id,
2033 			 enum rte_event_dev_xstats_mode mode,
2034 			 uint8_t queue_port_id,
2035 			 const uint64_t ids[],
2036 			 uint64_t values[], unsigned int n);
2037 
2038 /**
2039  * Retrieve the value of a single stat by requesting it by name.
2040  *
2041  * @param dev_id
2042  *   The identifier of the device
2043  * @param name
2044  *   The stat name to retrieve
2045  * @param[out] id
2046  *   If non-NULL, the numerical id of the stat will be returned, so that further
2047  *   requests for the stat can be got using rte_event_dev_xstats_get, which will
2048  *   be faster as it doesn't need to scan a list of names for the stat.
2049  *   If the stat cannot be found, the id returned will be (unsigned)-1.
2050  * @return
2051  *   - positive value or zero: the stat value
2052  *   - negative value: -EINVAL if stat not found, -ENOTSUP if not supported.
2053  */
2054 uint64_t
2055 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2056 				 uint64_t *id);
2057 
2058 /**
2059  * Reset the values of the xstats of the selected component in the device.
2060  *
2061  * @param dev_id
2062  *   The identifier of the device
2063  * @param mode
2064  *   The mode of the statistics to reset. Choose from device, queue or port.
2065  * @param queue_port_id
2066  *   The queue or port to reset. 0 and positive values select ports and queues,
2067  *   while -1 indicates all ports or queues.
2068  * @param ids
2069  *   Selects specific statistics to be reset. When NULL, all statistics selected
2070  *   by *mode* will be reset. If non-NULL, must point to array of at least
2071  *   *nb_ids* size.
2072  * @param nb_ids
2073  *   The number of ids available from the *ids* array. Ignored when ids is NULL.
2074  * @return
2075  *   - zero: successfully reset the statistics to zero
2076  *   - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported.
2077  */
2078 int
2079 rte_event_dev_xstats_reset(uint8_t dev_id,
2080 			   enum rte_event_dev_xstats_mode mode,
2081 			   int16_t queue_port_id,
2082 			   const uint64_t ids[],
2083 			   uint32_t nb_ids);
2084 
2085 /**
2086  * Trigger the eventdev self test.
2087  *
2088  * @param dev_id
2089  *   The identifier of the device
2090  * @return
2091  *   - 0: Selftest successful
2092  *   - -ENOTSUP if the device doesn't support selftest
2093  *   - other values < 0 on failure.
2094  */
2095 int rte_event_dev_selftest(uint8_t dev_id);
2096 
2097 /**
2098  * Get the memory required per event vector based on the number of elements per
2099  * vector.
2100  * This should be used to create the mempool that holds the event vectors.
2101  *
2102  * @param name
2103  *   The name of the vector pool.
2104  * @param n
2105  *   The number of elements in the mbuf pool.
2106  * @param cache_size
2107  *   Size of the per-core object cache. See rte_mempool_create() for
2108  *   details.
2109  * @param nb_elem
2110  *   The number of elements that a single event vector should be able to hold.
2111  * @param socket_id
2112  *   The socket identifier where the memory should be allocated. The
2113  *   value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the
2114  *   reserved zone
2115  *
2116  * @return
2117  *   The pointer to the newly allocated mempool, on success. NULL on error
2118  *   with rte_errno set appropriately. Possible rte_errno values include:
2119  *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
2120  *    - E_RTE_SECONDARY - function was called from a secondary process instance
2121  *    - EINVAL - cache size provided is too large, or priv_size is not aligned.
2122  *    - ENOSPC - the maximum number of memzones has already been allocated
2123  *    - EEXIST - a memzone with the same name already exists
2124  *    - ENOMEM - no appropriate memory area found in which to create memzone
2125  *    - ENAMETOOLONG - mempool name requested is too long.
2126  */
2127 struct rte_mempool *
2128 rte_event_vector_pool_create(const char *name, unsigned int n,
2129 			     unsigned int cache_size, uint16_t nb_elem,
2130 			     int socket_id);
2131 
2132 #include <rte_eventdev_core.h>
2133 
2134 static __rte_always_inline uint16_t
2135 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2136 			  const struct rte_event ev[], uint16_t nb_events,
2137 			  const event_enqueue_burst_t fn)
2138 {
2139 	const struct rte_event_fp_ops *fp_ops;
2140 	void *port;
2141 
2142 	fp_ops = &rte_event_fp_ops[dev_id];
2143 	port = fp_ops->data[port_id];
2144 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2145 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2146 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2147 		rte_errno = EINVAL;
2148 		return 0;
2149 	}
2150 
2151 	if (port == NULL) {
2152 		rte_errno = EINVAL;
2153 		return 0;
2154 	}
2155 #endif
2156 	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2157 	/*
2158 	 * Allow zero cost non burst mode routine invocation if application
2159 	 * requests nb_events as const one
2160 	 */
2161 	if (nb_events == 1)
2162 		return (fp_ops->enqueue)(port, ev);
2163 	else
2164 		return fn(port, ev, nb_events);
2165 }
2166 
2167 /**
2168  * Enqueue a burst of events objects or an event object supplied in *rte_event*
2169  * structure on an  event device designated by its *dev_id* through the event
2170  * port specified by *port_id*. Each event object specifies the event queue on
2171  * which it will be enqueued.
2172  *
2173  * The *nb_events* parameter is the number of event objects to enqueue which are
2174  * supplied in the *ev* array of *rte_event* structure.
2175  *
2176  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2177  * enqueued to the same port that their associated events were dequeued from.
2178  *
2179  * The rte_event_enqueue_burst() function returns the number of
2180  * events objects it actually enqueued. A return value equal to *nb_events*
2181  * means that all event objects have been enqueued.
2182  *
2183  * @param dev_id
2184  *   The identifier of the device.
2185  * @param port_id
2186  *   The identifier of the event port.
2187  * @param ev
2188  *   Points to an array of *nb_events* objects of type *rte_event* structure
2189  *   which contain the event object enqueue operations to be processed.
2190  * @param nb_events
2191  *   The number of event objects to enqueue, typically number of
2192  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2193  *   available for this port.
2194  *
2195  * @return
2196  *   The number of event objects actually enqueued on the event device. The
2197  *   return value can be less than the value of the *nb_events* parameter when
2198  *   the event devices queue is full or if invalid parameters are specified in a
2199  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2200  *   events at the end of ev[] are not consumed and the caller has to take care
2201  *   of them, and rte_errno is set accordingly. Possible errno values include:
2202  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2203  *              ID is invalid, or an event's sched type doesn't match the
2204  *              capabilities of the destination queue.
2205  *   - ENOSPC   The event port was backpressured and unable to enqueue
2206  *              one or more events. This error code is only applicable to
2207  *              closed systems.
2208  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2209  */
2210 static inline uint16_t
2211 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2212 			const struct rte_event ev[], uint16_t nb_events)
2213 {
2214 	const struct rte_event_fp_ops *fp_ops;
2215 
2216 	fp_ops = &rte_event_fp_ops[dev_id];
2217 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2218 					 fp_ops->enqueue_burst);
2219 }
2220 
2221 /**
2222  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
2223  * an event device designated by its *dev_id* through the event port specified
2224  * by *port_id*.
2225  *
2226  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2227  * application can use this API when the all objects in the burst contains
2228  * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
2229  * function can provide the additional hint to the PMD and optimize if possible.
2230  *
2231  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2232  * has event object of operation type != RTE_EVENT_OP_NEW.
2233  *
2234  * @param dev_id
2235  *   The identifier of the device.
2236  * @param port_id
2237  *   The identifier of the event port.
2238  * @param ev
2239  *   Points to an array of *nb_events* objects of type *rte_event* structure
2240  *   which contain the event object enqueue operations to be processed.
2241  * @param nb_events
2242  *   The number of event objects to enqueue, typically number of
2243  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2244  *   available for this port.
2245  *
2246  * @return
2247  *   The number of event objects actually enqueued on the event device. The
2248  *   return value can be less than the value of the *nb_events* parameter when
2249  *   the event devices queue is full or if invalid parameters are specified in a
2250  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2251  *   events at the end of ev[] are not consumed and the caller has to take care
2252  *   of them, and rte_errno is set accordingly. Possible errno values include:
2253  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2254  *              ID is invalid, or an event's sched type doesn't match the
2255  *              capabilities of the destination queue.
2256  *   - ENOSPC   The event port was backpressured and unable to enqueue
2257  *              one or more events. This error code is only applicable to
2258  *              closed systems.
2259  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2260  * @see rte_event_enqueue_burst()
2261  */
2262 static inline uint16_t
2263 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2264 			    const struct rte_event ev[], uint16_t nb_events)
2265 {
2266 	const struct rte_event_fp_ops *fp_ops;
2267 
2268 	fp_ops = &rte_event_fp_ops[dev_id];
2269 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2270 					 fp_ops->enqueue_new_burst);
2271 }
2272 
2273 /**
2274  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
2275  * on an event device designated by its *dev_id* through the event port
2276  * specified by *port_id*.
2277  *
2278  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2279  * application can use this API when the all objects in the burst contains
2280  * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
2281  * function can provide the additional hint to the PMD and optimize if possible.
2282  *
2283  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2284  * has event object of operation type != RTE_EVENT_OP_FORWARD.
2285  *
2286  * @param dev_id
2287  *   The identifier of the device.
2288  * @param port_id
2289  *   The identifier of the event port.
2290  * @param ev
2291  *   Points to an array of *nb_events* objects of type *rte_event* structure
2292  *   which contain the event object enqueue operations to be processed.
2293  * @param nb_events
2294  *   The number of event objects to enqueue, typically number of
2295  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2296  *   available for this port.
2297  *
2298  * @return
2299  *   The number of event objects actually enqueued on the event device. The
2300  *   return value can be less than the value of the *nb_events* parameter when
2301  *   the event devices queue is full or if invalid parameters are specified in a
2302  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2303  *   events at the end of ev[] are not consumed and the caller has to take care
2304  *   of them, and rte_errno is set accordingly. Possible errno values include:
2305  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2306  *              ID is invalid, or an event's sched type doesn't match the
2307  *              capabilities of the destination queue.
2308  *   - ENOSPC   The event port was backpressured and unable to enqueue
2309  *              one or more events. This error code is only applicable to
2310  *              closed systems.
2311  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2312  * @see rte_event_enqueue_burst()
2313  */
2314 static inline uint16_t
2315 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2316 				const struct rte_event ev[], uint16_t nb_events)
2317 {
2318 	const struct rte_event_fp_ops *fp_ops;
2319 
2320 	fp_ops = &rte_event_fp_ops[dev_id];
2321 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2322 					 fp_ops->enqueue_forward_burst);
2323 }
2324 
2325 /**
2326  * Dequeue a burst of events objects or an event object from the event port
2327  * designated by its *event_port_id*, on an event device designated
2328  * by its *dev_id*.
2329  *
2330  * rte_event_dequeue_burst() does not dictate the specifics of scheduling
2331  * algorithm as each eventdev driver may have different criteria to schedule
2332  * an event. However, in general, from an application perspective scheduler may
2333  * use the following scheme to dispatch an event to the port.
2334  *
2335  * 1) Selection of event queue based on
2336  *   a) The list of event queues are linked to the event port.
2337  *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
2338  *   queue selection from list is based on event queue priority relative to
2339  *   other event queue supplied as *priority* in rte_event_queue_setup()
2340  *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
2341  *   queue selection from the list is based on event priority supplied as
2342  *   *priority* in rte_event_enqueue_burst()
2343  * 2) Selection of event
2344  *   a) The number of flows available in selected event queue.
2345  *   b) Schedule type method associated with the event
2346  *
2347  * The *nb_events* parameter is the maximum number of event objects to dequeue
2348  * which are returned in the *ev* array of *rte_event* structure.
2349  *
2350  * The rte_event_dequeue_burst() function returns the number of events objects
2351  * it actually dequeued. A return value equal to *nb_events* means that all
2352  * event objects have been dequeued.
2353  *
2354  * The number of events dequeued is the number of scheduler contexts held by
2355  * this port. These contexts are automatically released in the next
2356  * rte_event_dequeue_burst() invocation if the port supports implicit
2357  * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
2358  * operation can be used to release the contexts early.
2359  *
2360  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2361  * enqueued to the same port that their associated events were dequeued from.
2362  *
2363  * @param dev_id
2364  *   The identifier of the device.
2365  * @param port_id
2366  *   The identifier of the event port.
2367  * @param[out] ev
2368  *   Points to an array of *nb_events* objects of type *rte_event* structure
2369  *   for output to be populated with the dequeued event objects.
2370  * @param nb_events
2371  *   The maximum number of event objects to dequeue, typically number of
2372  *   rte_event_port_dequeue_depth() available for this port.
2373  *
2374  * @param timeout_ticks
2375  *   - 0 no-wait, returns immediately if there is no event.
2376  *   - >0 wait for the event, if the device is configured with
2377  *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
2378  *   at least one event is available or *timeout_ticks* time.
2379  *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
2380  *   then this function will wait until the event available or
2381  *   *dequeue_timeout_ns* ns which was previously supplied to
2382  *   rte_event_dev_configure()
2383  *
2384  * @return
2385  * The number of event objects actually dequeued from the port. The return
2386  * value can be less than the value of the *nb_events* parameter when the
2387  * event port's queue is not full.
2388  *
2389  * @see rte_event_port_dequeue_depth()
2390  */
2391 static inline uint16_t
2392 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2393 			uint16_t nb_events, uint64_t timeout_ticks)
2394 {
2395 	const struct rte_event_fp_ops *fp_ops;
2396 	void *port;
2397 
2398 	fp_ops = &rte_event_fp_ops[dev_id];
2399 	port = fp_ops->data[port_id];
2400 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2401 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2402 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2403 		rte_errno = EINVAL;
2404 		return 0;
2405 	}
2406 
2407 	if (port == NULL) {
2408 		rte_errno = EINVAL;
2409 		return 0;
2410 	}
2411 #endif
2412 	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2413 	/*
2414 	 * Allow zero cost non burst mode routine invocation if application
2415 	 * requests nb_events as const one
2416 	 */
2417 	if (nb_events == 1)
2418 		return (fp_ops->dequeue)(port, ev, timeout_ticks);
2419 	else
2420 		return (fp_ops->dequeue_burst)(port, ev, nb_events,
2421 					       timeout_ticks);
2422 }
2423 
2424 #define RTE_EVENT_DEV_MAINT_OP_FLUSH          (1 << 0)
2425 /**< Force an immediately flush of any buffered events in the port,
2426  * potentially at the cost of additional overhead.
2427  *
2428  * @see rte_event_maintain()
2429  */
2430 
2431 /**
2432  * Maintain an event device.
2433  *
2434  * This function is only relevant for event devices which do not have
2435  * the @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE flag set. Such devices
2436  * require an application thread using a particular port to
2437  * periodically call rte_event_maintain() on that port during periods
2438  * which it is neither attempting to enqueue events to nor dequeue
2439  * events from the port. rte_event_maintain() is a low-overhead
2440  * function and should be called at a high rate (e.g., in the
2441  * application's poll loop).
2442  *
2443  * No port may be left unmaintained.
2444  *
2445  * At the application thread's convenience, rte_event_maintain() may
2446  * (but is not required to) be called even during periods when enqueue
2447  * or dequeue functions are being called, at the cost of a slight
2448  * increase in overhead.
2449  *
2450  * rte_event_maintain() may be called on event devices which have set
2451  * @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, in which case it is a
2452  * no-operation.
2453  *
2454  * @param dev_id
2455  *   The identifier of the device.
2456  * @param port_id
2457  *   The identifier of the event port.
2458  * @param op
2459  *   0, or @ref RTE_EVENT_DEV_MAINT_OP_FLUSH.
2460  * @return
2461  *  - 0 on success.
2462  *  - -EINVAL if *dev_id*,  *port_id*, or *op* is invalid.
2463  *
2464  * @see RTE_EVENT_DEV_CAP_MAINTENANCE_FREE
2465  */
2466 static inline int
2467 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2468 {
2469 	const struct rte_event_fp_ops *fp_ops;
2470 	void *port;
2471 
2472 	fp_ops = &rte_event_fp_ops[dev_id];
2473 	port = fp_ops->data[port_id];
2474 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2475 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2476 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2477 		return -EINVAL;
2478 
2479 	if (port == NULL)
2480 		return -EINVAL;
2481 
2482 	if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2483 		return -EINVAL;
2484 #endif
2485 	rte_eventdev_trace_maintain(dev_id, port_id, op);
2486 
2487 	if (fp_ops->maintain != NULL)
2488 		fp_ops->maintain(port, op);
2489 
2490 	return 0;
2491 }
2492 
2493 /**
2494  * Change the active profile on an event port.
2495  *
2496  * This function is used to change the current active profile on an event port
2497  * when multiple link profiles are configured on an event port through the
2498  * function call ``rte_event_port_profile_links_set``.
2499  *
2500  * On the subsequent ``rte_event_dequeue_burst`` call, only the event queues
2501  * that were associated with the newly active profile will participate in
2502  * scheduling.
2503  *
2504  * @param dev_id
2505  *   The identifier of the device.
2506  * @param port_id
2507  *   The identifier of the event port.
2508  * @param profile_id
2509  *   The identifier of the profile.
2510  * @return
2511  *  - 0 on success.
2512  *  - -EINVAL if *dev_id*,  *port_id*, or *profile_id* is invalid.
2513  */
2514 static inline uint8_t
2515 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2516 {
2517 	const struct rte_event_fp_ops *fp_ops;
2518 	void *port;
2519 
2520 	fp_ops = &rte_event_fp_ops[dev_id];
2521 	port = fp_ops->data[port_id];
2522 
2523 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2524 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2525 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2526 		return -EINVAL;
2527 
2528 	if (port == NULL)
2529 		return -EINVAL;
2530 
2531 	if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2532 		return -EINVAL;
2533 #endif
2534 	rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2535 
2536 	return fp_ops->profile_switch(port, profile_id);
2537 }
2538 
2539 #ifdef __cplusplus
2540 }
2541 #endif
2542 
2543 #endif /* _RTE_EVENTDEV_H_ */
2544