xref: /dpdk/lib/eventdev/rte_eventdev.h (revision eaa8fb6cfe5cb263bb5a076ea04cd559da0e8327)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
11 /**
12  * @file
13  *
14  * RTE Event Device API
15  *
16  * In a polling model, lcores poll ethdev ports and associated rx queues
17  * directly to look for packet. In an event driven model, by contrast, lcores
18  * call the scheduler that selects packets for them based on programmer
19  * specified criteria. Eventdev library adds support for event driven
20  * programming model, which offer applications automatic multicore scaling,
21  * dynamic load balancing, pipelining, packet ingress order maintenance and
22  * synchronization services to simplify application packet processing.
23  *
24  * The Event Device API is composed of two parts:
25  *
26  * - The application-oriented Event API that includes functions to setup
27  *   an event device (configure it, setup its queues, ports and start it), to
28  *   establish the link between queues to port and to receive events, and so on.
29  *
30  * - The driver-oriented Event API that exports a function allowing
31  *   an event poll Mode Driver (PMD) to simultaneously register itself as
32  *   an event device driver.
33  *
34  * Event device components:
35  *
36  *                     +-----------------+
37  *                     | +-------------+ |
38  *        +-------+    | |    flow 0   | |
39  *        |Packet |    | +-------------+ |
40  *        |event  |    | +-------------+ |
41  *        |       |    | |    flow 1   | |port_link(port0, queue0)
42  *        +-------+    | +-------------+ |     |     +--------+
43  *        +-------+    | +-------------+ o-----v-----o        |dequeue +------+
44  *        |Crypto |    | |    flow n   | |           | event  +------->|Core 0|
45  *        |work   |    | +-------------+ o----+      | port 0 |        |      |
46  *        |done ev|    |  event queue 0  |    |      +--------+        +------+
47  *        +-------+    +-----------------+    |
48  *        +-------+                           |
49  *        |Timer  |    +-----------------+    |      +--------+
50  *        |expiry |    | +-------------+ |    +------o        |dequeue +------+
51  *        |event  |    | |    flow 0   | o-----------o event  +------->|Core 1|
52  *        +-------+    | +-------------+ |      +----o port 1 |        |      |
53  *       Event enqueue | +-------------+ |      |    +--------+        +------+
54  *     o-------------> | |    flow 1   | |      |
55  *        enqueue(     | +-------------+ |      |
56  *        queue_id,    |                 |      |    +--------+        +------+
57  *        flow_id,     | +-------------+ |      |    |        |dequeue |Core 2|
58  *        sched_type,  | |    flow n   | o-----------o event  +------->|      |
59  *        event_type,  | +-------------+ |      |    | port 2 |        +------+
60  *        subev_type,  |  event queue 1  |      |    +--------+
61  *        event)       +-----------------+      |    +--------+
62  *                                              |    |        |dequeue +------+
63  *        +-------+    +-----------------+      |    | event  +------->|Core n|
64  *        |Core   |    | +-------------+ o-----------o port n |        |      |
65  *        |(SW)   |    | |    flow 0   | |      |    +--------+        +--+---+
66  *        |event  |    | +-------------+ |      |                         |
67  *        +-------+    | +-------------+ |      |                         |
68  *            ^        | |    flow 1   | |      |                         |
69  *            |        | +-------------+ o------+                         |
70  *            |        | +-------------+ |                                |
71  *            |        | |    flow n   | |                                |
72  *            |        | +-------------+ |                                |
73  *            |        |  event queue n  |                                |
74  *            |        +-----------------+                                |
75  *            |                                                           |
76  *            +-----------------------------------------------------------+
77  *
78  * Event device: A hardware or software-based event scheduler.
79  *
80  * Event: A unit of scheduling that encapsulates a packet or other datatype
81  * like SW generated event from the CPU, Crypto work completion notification,
82  * Timer expiry event notification etc as well as metadata.
83  * The metadata includes flow ID, scheduling type, event priority, event_type,
84  * sub_event_type etc.
85  *
86  * Event queue: A queue containing events that are scheduled by the event dev.
87  * An event queue contains events of different flows associated with scheduling
88  * types, such as atomic, ordered, or parallel.
89  *
90  * Event port: An application's interface into the event dev for enqueue and
91  * dequeue operations. Each event port can be linked with one or more
92  * event queues for dequeue operations.
93  *
94  * By default, all the functions of the Event Device API exported by a PMD
95  * are lock-free functions which assume to not be invoked in parallel on
96  * different logical cores to work on the same target object. For instance,
97  * the dequeue function of a PMD cannot be invoked in parallel on two logical
98  * cores to operates on same  event port. Of course, this function
99  * can be invoked in parallel by different logical cores on different ports.
100  * It is the responsibility of the upper level application to enforce this rule.
101  *
102  * In all functions of the Event API, the Event device is
103  * designated by an integer >= 0 named the device identifier *dev_id*
104  *
105  * At the Event driver level, Event devices are represented by a generic
106  * data structure of type *rte_event_dev*.
107  *
108  * Event devices are dynamically registered during the PCI/SoC device probing
109  * phase performed at EAL initialization time.
110  * When an Event device is being probed, a *rte_event_dev* structure and
111  * a new device identifier are allocated for that device. Then, the
112  * event_dev_init() function supplied by the Event driver matching the probed
113  * device is invoked to properly initialize the device.
114  *
115  * The role of the device init function consists of resetting the hardware or
116  * software event driver implementations.
117  *
118  * If the device init operation is successful, the correspondence between
119  * the device identifier assigned to the new device and its associated
120  * *rte_event_dev* structure is effectively registered.
121  * Otherwise, both the *rte_event_dev* structure and the device identifier are
122  * freed.
123  *
124  * The functions exported by the application Event API to setup a device
125  * designated by its device identifier must be invoked in the following order:
126  *     - rte_event_dev_configure()
127  *     - rte_event_queue_setup()
128  *     - rte_event_port_setup()
129  *     - rte_event_port_link()
130  *     - rte_event_dev_start()
131  *
132  * Then, the application can invoke, in any order, the functions
133  * exported by the Event API to schedule events, dequeue events, enqueue events,
134  * change event queue(s) to event port [un]link establishment and so on.
135  *
136  * Application may use rte_event_[queue/port]_default_conf_get() to get the
137  * default configuration to set up an event queue or event port by
138  * overriding few default values.
139  *
140  * If the application wants to change the configuration (i.e. call
141  * rte_event_dev_configure(), rte_event_queue_setup(), or
142  * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the
143  * device and then do the reconfiguration before calling rte_event_dev_start()
144  * again. The schedule, enqueue and dequeue functions should not be invoked
145  * when the device is stopped.
146  *
147  * Finally, an application can close an Event device by invoking the
148  * rte_event_dev_close() function.
149  *
150  * Each function of the application Event API invokes a specific function
151  * of the PMD that controls the target device designated by its device
152  * identifier.
153  *
154  * For this purpose, all device-specific functions of an Event driver are
155  * supplied through a set of pointers contained in a generic structure of type
156  * *event_dev_ops*.
157  * The address of the *event_dev_ops* structure is stored in the *rte_event_dev*
158  * structure by the device init function of the Event driver, which is
159  * invoked during the PCI/SoC device probing phase, as explained earlier.
160  *
161  * In other words, each function of the Event API simply retrieves the
162  * *rte_event_dev* structure associated with the device identifier and
163  * performs an indirect invocation of the corresponding driver function
164  * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure.
165  *
166  * For performance reasons, the address of the fast-path functions of the
167  * Event driver is not contained in the *event_dev_ops* structure.
168  * Instead, they are directly stored at the beginning of the *rte_event_dev*
169  * structure to avoid an extra indirect memory access during their invocation.
170  *
171  * RTE event device drivers do not use interrupts for enqueue or dequeue
172  * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue
173  * functions to applications.
174  *
175  * The events are injected to event device through *enqueue* operation by
176  * event producers in the system. The typical event producers are ethdev
177  * subsystem for generating packet events, CPU(SW) for generating events based
178  * on different stages of application processing, cryptodev for generating
179  * crypto work completion notification etc
180  *
181  * The *dequeue* operation gets one or more events from the event ports.
182  * The application process the events and send to downstream event queue through
183  * rte_event_enqueue_burst() if it is an intermediate stage of event processing,
184  * on the final stage, the application may use Tx adapter API for maintaining
185  * the ingress order and then send the packet/event on the wire.
186  *
187  * The point at which events are scheduled to ports depends on the device.
188  * For hardware devices, scheduling occurs asynchronously without any software
189  * intervention. Software schedulers can either be distributed
190  * (each worker thread schedules events to its own port) or centralized
191  * (a dedicated thread schedules to all ports). Distributed software schedulers
192  * perform the scheduling in rte_event_dequeue_burst(), whereas centralized
193  * scheduler logic need a dedicated service core for scheduling.
194  * The RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag is not set
195  * indicates the device is centralized and thus needs a dedicated scheduling
196  * thread that repeatedly calls software specific scheduling function.
197  *
198  * An event driven worker thread has following typical workflow on fastpath:
199  * \code{.c}
200  *	while (1) {
201  *		rte_event_dequeue_burst(...);
202  *		(event processing)
203  *		rte_event_enqueue_burst(...);
204  *	}
205  * \endcode
206  */
207 
208 #ifdef __cplusplus
209 extern "C" {
210 #endif
211 
212 #include <rte_compat.h>
213 #include <rte_common.h>
214 #include <rte_errno.h>
215 #include <rte_mbuf_pool_ops.h>
216 #include <rte_mempool.h>
217 
218 #include "rte_eventdev_trace_fp.h"
219 
220 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
221 struct rte_event;
222 
223 /* Event device capability bitmap flags */
224 #define RTE_EVENT_DEV_CAP_QUEUE_QOS           (1ULL << 0)
225 /**< Event scheduling prioritization is based on the priority and weight
226  * associated with each event queue. Events from a queue with highest priority
227  * is scheduled first. If the queues are of same priority, weight of the queues
228  * are considered to select a queue in a weighted round robin fashion.
229  * Subsequent dequeue calls from an event port could see events from the same
230  * event queue, if the queue is configured with an affinity count. Affinity
231  * count is the number of subsequent dequeue calls, in which an event port
232  * should use the same event queue if the queue is non-empty
233  *
234  *  @see rte_event_queue_setup(), rte_event_queue_attr_set()
235  */
236 #define RTE_EVENT_DEV_CAP_EVENT_QOS           (1ULL << 1)
237 /**< Event scheduling prioritization is based on the priority associated with
238  *  each event. Priority of each event is supplied in *rte_event* structure
239  *  on each enqueue operation.
240  *
241  *  @see rte_event_enqueue_burst()
242  */
243 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED   (1ULL << 2)
244 /**< Event device operates in distributed scheduling mode.
245  * In distributed scheduling mode, event scheduling happens in HW or
246  * rte_event_dequeue_burst() or the combination of these two.
247  * If the flag is not set then eventdev is centralized and thus needs a
248  * dedicated service core that acts as a scheduling thread .
249  *
250  * @see rte_event_dequeue_burst()
251  */
252 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES     (1ULL << 3)
253 /**< Event device is capable of enqueuing events of any type to any queue.
254  * If this capability is not set, the queue only supports events of the
255  *  *RTE_SCHED_TYPE_* type that it was created with.
256  *
257  * @see RTE_SCHED_TYPE_* values
258  */
259 #define RTE_EVENT_DEV_CAP_BURST_MODE          (1ULL << 4)
260 /**< Event device is capable of operating in burst mode for enqueue(forward,
261  * release) and dequeue operation. If this capability is not set, application
262  * still uses the rte_event_dequeue_burst() and rte_event_enqueue_burst() but
263  * PMD accepts only one event at a time.
264  *
265  * @see rte_event_dequeue_burst() rte_event_enqueue_burst()
266  */
267 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE    (1ULL << 5)
268 /**< Event device ports support disabling the implicit release feature, in
269  * which the port will release all unreleased events in its dequeue operation.
270  * If this capability is set and the port is configured with implicit release
271  * disabled, the application is responsible for explicitly releasing events
272  * using either the RTE_EVENT_OP_FORWARD or the RTE_EVENT_OP_RELEASE event
273  * enqueue operations.
274  *
275  * @see rte_event_dequeue_burst() rte_event_enqueue_burst()
276  */
277 
278 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE         (1ULL << 6)
279 /**< Event device is capable of operating in none sequential mode. The path
280  * of the event is not necessary to be sequential. Application can change
281  * the path of event at runtime. If the flag is not set, then event each event
282  * will follow a path from queue 0 to queue 1 to queue 2 etc. If the flag is
283  * set, events may be sent to queues in any order. If the flag is not set, the
284  * eventdev will return an error when the application enqueues an event for a
285  * qid which is not the next in the sequence.
286  */
287 
288 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK   (1ULL << 7)
289 /**< Event device is capable of configuring the queue/port link at runtime.
290  * If the flag is not set, the eventdev queue/port link is only can be
291  * configured during  initialization.
292  */
293 
294 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
295 /**< Event device is capable of setting up the link between multiple queue
296  * with single port. If the flag is not set, the eventdev can only map a
297  * single queue to each port or map a single queue to many port.
298  */
299 
300 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
301 /**< Event device preserves the flow ID from the enqueued
302  * event to the dequeued event if the flag is set. Otherwise,
303  * the content of this field is implementation dependent.
304  */
305 
306 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
307 /**< Event device *does not* require calls to rte_event_maintain().
308  * An event device that does not set this flag requires calls to
309  * rte_event_maintain() during periods when neither
310  * rte_event_dequeue_burst() nor rte_event_enqueue_burst() are called
311  * on a port. This will allow the event device to perform internal
312  * processing, such as flushing buffered events, return credits to a
313  * global pool, or process signaling related to load balancing.
314  */
315 
316 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
317 /**< Event device is capable of changing the queue attributes at runtime i.e
318  * after rte_event_queue_setup() or rte_event_start() call sequence. If this
319  * flag is not set, eventdev queue attributes can only be configured during
320  * rte_event_queue_setup().
321  */
322 
323 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
324 /**< Event device is capable of supporting multiple link profiles per event port
325  * i.e., the value of `rte_event_dev_info::max_profiles_per_port` is greater
326  * than one.
327  */
328 
329 #define RTE_EVENT_DEV_CAP_ATOMIC  (1ULL << 13)
330 /**< Event device is capable of atomic scheduling.
331  * When this flag is set, the application can configure queues with scheduling type
332  * atomic on this event device.
333  * @see RTE_SCHED_TYPE_ATOMIC
334  */
335 
336 #define RTE_EVENT_DEV_CAP_ORDERED  (1ULL << 14)
337 /**< Event device is capable of ordered scheduling.
338  * When this flag is set, the application can configure queues with scheduling type
339  * ordered on this event device.
340  * @see RTE_SCHED_TYPE_ORDERED
341  */
342 
343 #define RTE_EVENT_DEV_CAP_PARALLEL  (1ULL << 15)
344 /**< Event device is capable of parallel scheduling.
345  * When this flag is set, the application can configure queues with scheduling type
346  * parallel on this event device.
347  * @see RTE_SCHED_TYPE_PARALLEL
348  */
349 
350 /* Event device priority levels */
351 #define RTE_EVENT_DEV_PRIORITY_HIGHEST   0
352 /**< Highest priority expressed across eventdev subsystem
353  * @see rte_event_queue_setup(), rte_event_enqueue_burst()
354  * @see rte_event_port_link()
355  */
356 #define RTE_EVENT_DEV_PRIORITY_NORMAL    128
357 /**< Normal priority expressed across eventdev subsystem
358  * @see rte_event_queue_setup(), rte_event_enqueue_burst()
359  * @see rte_event_port_link()
360  */
361 #define RTE_EVENT_DEV_PRIORITY_LOWEST    255
362 /**< Lowest priority expressed across eventdev subsystem
363  * @see rte_event_queue_setup(), rte_event_enqueue_burst()
364  * @see rte_event_port_link()
365  */
366 
367 /* Event queue scheduling weights */
368 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
369 /**< Highest weight of an event queue
370  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
371  */
372 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
373 /**< Lowest weight of an event queue
374  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
375  */
376 
377 /* Event queue scheduling affinity */
378 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
379 /**< Highest scheduling affinity of an event queue
380  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
381  */
382 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
383 /**< Lowest scheduling affinity of an event queue
384  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
385  */
386 
387 /**
388  * Get the total number of event devices that have been successfully
389  * initialised.
390  *
391  * @return
392  *   The total number of usable event devices.
393  */
394 uint8_t
395 rte_event_dev_count(void);
396 
397 /**
398  * Get the device identifier for the named event device.
399  *
400  * @param name
401  *   Event device name to select the event device identifier.
402  *
403  * @return
404  *   Returns event device identifier on success.
405  *   - <0: Failure to find named event device.
406  */
407 int
408 rte_event_dev_get_dev_id(const char *name);
409 
410 /**
411  * Return the NUMA socket to which a device is connected.
412  *
413  * @param dev_id
414  *   The identifier of the device.
415  * @return
416  *   The NUMA socket id to which the device is connected or
417  *   a default of zero if the socket could not be determined.
418  *   -(-EINVAL)  dev_id value is out of range.
419  */
420 int
421 rte_event_dev_socket_id(uint8_t dev_id);
422 
423 /**
424  * Event device information
425  */
426 struct rte_event_dev_info {
427 	const char *driver_name;	/**< Event driver name */
428 	struct rte_device *dev;	/**< Device information */
429 	uint32_t min_dequeue_timeout_ns;
430 	/**< Minimum supported global dequeue timeout(ns) by this device */
431 	uint32_t max_dequeue_timeout_ns;
432 	/**< Maximum supported global dequeue timeout(ns) by this device */
433 	uint32_t dequeue_timeout_ns;
434 	/**< Configured global dequeue timeout(ns) for this device */
435 	uint8_t max_event_queues;
436 	/**< Maximum event_queues supported by this device */
437 	uint32_t max_event_queue_flows;
438 	/**< Maximum supported flows in an event queue by this device*/
439 	uint8_t max_event_queue_priority_levels;
440 	/**< Maximum number of event queue priority levels by this device.
441 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
442 	 */
443 	uint8_t max_event_priority_levels;
444 	/**< Maximum number of event priority levels by this device.
445 	 * Valid when the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability
446 	 */
447 	uint8_t max_event_ports;
448 	/**< Maximum number of event ports supported by this device */
449 	uint8_t max_event_port_dequeue_depth;
450 	/**< Maximum number of events can be dequeued at a time from an
451 	 * event port by this device.
452 	 * A device that does not support bulk dequeue will set this as 1.
453 	 */
454 	uint32_t max_event_port_enqueue_depth;
455 	/**< Maximum number of events can be enqueued at a time from an
456 	 * event port by this device.
457 	 * A device that does not support bulk enqueue will set this as 1.
458 	 */
459 	uint8_t max_event_port_links;
460 	/**< Maximum number of queues that can be linked to a single event
461 	 * port by this device.
462 	 */
463 	int32_t max_num_events;
464 	/**< A *closed system* event dev has a limit on the number of events it
465 	 * can manage at a time. An *open system* event dev does not have a
466 	 * limit and will specify this as -1.
467 	 */
468 	uint32_t event_dev_cap;
469 	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
470 	uint8_t max_single_link_event_port_queue_pairs;
471 	/**< Maximum number of event ports and queues that are optimized for
472 	 * (and only capable of) single-link configurations supported by this
473 	 * device. These ports and queues are not accounted for in
474 	 * max_event_ports or max_event_queues.
475 	 */
476 	uint8_t max_profiles_per_port;
477 	/**< Maximum number of event queue profiles per event port.
478 	 * A device that doesn't support multiple profiles will set this as 1.
479 	 */
480 };
481 
482 /**
483  * Retrieve the contextual information of an event device.
484  *
485  * @param dev_id
486  *   The identifier of the device.
487  *
488  * @param[out] dev_info
489  *   A pointer to a structure of type *rte_event_dev_info* to be filled with the
490  *   contextual information of the device.
491  *
492  * @return
493  *   - 0: Success, driver updates the contextual information of the event device
494  *   - <0: Error code returned by the driver info get function.
495  */
496 int
497 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
498 
499 /**
500  * The count of ports.
501  */
502 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
503 /**
504  * The count of queues.
505  */
506 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
507 /**
508  * The status of the device, zero for stopped, non-zero for started.
509  */
510 #define RTE_EVENT_DEV_ATTR_STARTED 2
511 
512 /**
513  * Get an attribute from a device.
514  *
515  * @param dev_id Eventdev id
516  * @param attr_id The attribute ID to retrieve
517  * @param[out] attr_value A pointer that will be filled in with the attribute
518  *             value if successful.
519  *
520  * @return
521  *   - 0: Successfully retrieved attribute value
522  *   - -EINVAL: Invalid device or  *attr_id* provided, or *attr_value* is NULL
523  */
524 int
525 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
526 		       uint32_t *attr_value);
527 
528 
529 /* Event device configuration bitmap flags */
530 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
531 /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns.
532  *  @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst()
533  */
534 
535 /** Event device configuration structure */
536 struct rte_event_dev_config {
537 	uint32_t dequeue_timeout_ns;
538 	/**< rte_event_dequeue_burst() timeout on this device.
539 	 * This value should be in the range of *min_dequeue_timeout_ns* and
540 	 * *max_dequeue_timeout_ns* which previously provided in
541 	 * rte_event_dev_info_get()
542 	 * The value 0 is allowed, in which case, default dequeue timeout used.
543 	 * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
544 	 */
545 	int32_t nb_events_limit;
546 	/**< In a *closed system* this field is the limit on maximum number of
547 	 * events that can be inflight in the eventdev at a given time. The
548 	 * limit is required to ensure that the finite space in a closed system
549 	 * is not overwhelmed. The value cannot exceed the *max_num_events*
550 	 * as provided by rte_event_dev_info_get().
551 	 * This value should be set to -1 for *open system*.
552 	 */
553 	uint8_t nb_event_queues;
554 	/**< Number of event queues to configure on this device.
555 	 * This value cannot exceed the *max_event_queues* which previously
556 	 * provided in rte_event_dev_info_get()
557 	 */
558 	uint8_t nb_event_ports;
559 	/**< Number of event ports to configure on this device.
560 	 * This value cannot exceed the *max_event_ports* which previously
561 	 * provided in rte_event_dev_info_get()
562 	 */
563 	uint32_t nb_event_queue_flows;
564 	/**< Number of flows for any event queue on this device.
565 	 * This value cannot exceed the *max_event_queue_flows* which previously
566 	 * provided in rte_event_dev_info_get()
567 	 */
568 	uint32_t nb_event_port_dequeue_depth;
569 	/**< Maximum number of events can be dequeued at a time from an
570 	 * event port by this device.
571 	 * This value cannot exceed the *max_event_port_dequeue_depth*
572 	 * which previously provided in rte_event_dev_info_get().
573 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
574 	 * @see rte_event_port_setup()
575 	 */
576 	uint32_t nb_event_port_enqueue_depth;
577 	/**< Maximum number of events can be enqueued at a time from an
578 	 * event port by this device.
579 	 * This value cannot exceed the *max_event_port_enqueue_depth*
580 	 * which previously provided in rte_event_dev_info_get().
581 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
582 	 * @see rte_event_port_setup()
583 	 */
584 	uint32_t event_dev_cfg;
585 	/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
586 	uint8_t nb_single_link_event_port_queues;
587 	/**< Number of event ports and queues that will be singly-linked to
588 	 * each other. These are a subset of the overall event ports and
589 	 * queues; this value cannot exceed *nb_event_ports* or
590 	 * *nb_event_queues*. If the device has ports and queues that are
591 	 * optimized for single-link usage, this field is a hint for how many
592 	 * to allocate; otherwise, regular event ports and queues can be used.
593 	 */
594 };
595 
596 /**
597  * Configure an event device.
598  *
599  * This function must be invoked first before any other function in the
600  * API. This function can also be re-invoked when a device is in the
601  * stopped state.
602  *
603  * The caller may use rte_event_dev_info_get() to get the capability of each
604  * resources available for this event device.
605  *
606  * @param dev_id
607  *   The identifier of the device to configure.
608  * @param dev_conf
609  *   The event device configuration structure.
610  *
611  * @return
612  *   - 0: Success, device configured.
613  *   - <0: Error code returned by the driver configuration function.
614  */
615 int
616 rte_event_dev_configure(uint8_t dev_id,
617 			const struct rte_event_dev_config *dev_conf);
618 
619 /* Event queue specific APIs */
620 
621 /* Event queue configuration bitmap flags */
622 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES          (1ULL << 0)
623 /**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue
624  *
625  * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
626  * @see rte_event_enqueue_burst()
627  */
628 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK        (1ULL << 1)
629 /**< This event queue links only to a single event port.
630  *
631  *  @see rte_event_port_setup(), rte_event_port_link()
632  */
633 
634 /** Event queue configuration structure */
635 struct rte_event_queue_conf {
636 	uint32_t nb_atomic_flows;
637 	/**< The maximum number of active flows this queue can track at any
638 	 * given time. If the queue is configured for atomic scheduling (by
639 	 * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg
640 	 * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the
641 	 * value must be in the range of [1, nb_event_queue_flows], which was
642 	 * previously provided in rte_event_dev_configure().
643 	 */
644 	uint32_t nb_atomic_order_sequences;
645 	/**< The maximum number of outstanding events waiting to be
646 	 * reordered by this queue. In other words, the number of entries in
647 	 * this queue’s reorder buffer.When the number of events in the
648 	 * reorder buffer reaches to *nb_atomic_order_sequences* then the
649 	 * scheduler cannot schedule the events from this queue and invalid
650 	 * event will be returned from dequeue until one or more entries are
651 	 * freed up/released.
652 	 * If the queue is configured for ordered scheduling (by applying the
653 	 * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or
654 	 * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must
655 	 * be in the range of [1, nb_event_queue_flows], which was
656 	 * previously supplied to rte_event_dev_configure().
657 	 */
658 	uint32_t event_queue_cfg;
659 	/**< Queue cfg flags(EVENT_QUEUE_CFG_) */
660 	uint8_t schedule_type;
661 	/**< Queue schedule type(RTE_SCHED_TYPE_*).
662 	 * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in
663 	 * event_queue_cfg.
664 	 */
665 	uint8_t priority;
666 	/**< Priority for this event queue relative to other event queues.
667 	 * The requested priority should in the range of
668 	 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
669 	 * The implementation shall normalize the requested priority to
670 	 * event device supported priority value.
671 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
672 	 */
673 	uint8_t weight;
674 	/**< Weight of the event queue relative to other event queues.
675 	 * The requested weight should be in the range of
676 	 * [RTE_EVENT_DEV_WEIGHT_HIGHEST, RTE_EVENT_DEV_WEIGHT_LOWEST].
677 	 * The implementation shall normalize the requested weight to event
678 	 * device supported weight value.
679 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
680 	 */
681 	uint8_t affinity;
682 	/**< Affinity of the event queue relative to other event queues.
683 	 * The requested affinity should be in the range of
684 	 * [RTE_EVENT_DEV_AFFINITY_HIGHEST, RTE_EVENT_DEV_AFFINITY_LOWEST].
685 	 * The implementation shall normalize the requested affinity to event
686 	 * device supported affinity value.
687 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
688 	 */
689 };
690 
691 /**
692  * Retrieve the default configuration information of an event queue designated
693  * by its *queue_id* from the event driver for an event device.
694  *
695  * This function intended to be used in conjunction with rte_event_queue_setup()
696  * where caller needs to set up the queue by overriding few default values.
697  *
698  * @param dev_id
699  *   The identifier of the device.
700  * @param queue_id
701  *   The index of the event queue to get the configuration information.
702  *   The value must be in the range [0, nb_event_queues - 1]
703  *   previously supplied to rte_event_dev_configure().
704  * @param[out] queue_conf
705  *   The pointer to the default event queue configuration data.
706  * @return
707  *   - 0: Success, driver updates the default event queue configuration data.
708  *   - <0: Error code returned by the driver info get function.
709  *
710  * @see rte_event_queue_setup()
711  */
712 int
713 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
714 				 struct rte_event_queue_conf *queue_conf);
715 
716 /**
717  * Allocate and set up an event queue for an event device.
718  *
719  * @param dev_id
720  *   The identifier of the device.
721  * @param queue_id
722  *   The index of the event queue to setup. The value must be in the range
723  *   [0, nb_event_queues - 1] previously supplied to rte_event_dev_configure().
724  * @param queue_conf
725  *   The pointer to the configuration data to be used for the event queue.
726  *   NULL value is allowed, in which case default configuration	used.
727  *
728  * @see rte_event_queue_default_conf_get()
729  *
730  * @return
731  *   - 0: Success, event queue correctly set up.
732  *   - <0: event queue configuration failed
733  */
734 int
735 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
736 		      const struct rte_event_queue_conf *queue_conf);
737 
738 /**
739  * The priority of the queue.
740  */
741 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
742 /**
743  * The number of atomic flows configured for the queue.
744  */
745 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
746 /**
747  * The number of atomic order sequences configured for the queue.
748  */
749 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
750 /**
751  * The cfg flags for the queue.
752  */
753 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
754 /**
755  * The schedule type of the queue.
756  */
757 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
758 /**
759  * The weight of the queue.
760  */
761 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
762 /**
763  * Affinity of the queue.
764  */
765 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
766 
767 /**
768  * Get an attribute from a queue.
769  *
770  * @param dev_id
771  *   Eventdev id
772  * @param queue_id
773  *   Eventdev queue id
774  * @param attr_id
775  *   The attribute ID to retrieve
776  * @param[out] attr_value
777  *   A pointer that will be filled in with the attribute value if successful
778  *
779  * @return
780  *   - 0: Successfully returned value
781  *   - -EINVAL: invalid device, queue or attr_id provided, or attr_value was
782  *		NULL
783  *   - -EOVERFLOW: returned when attr_id is set to
784  *   RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to
785  *   RTE_EVENT_QUEUE_CFG_ALL_TYPES
786  */
787 int
788 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
789 			uint32_t *attr_value);
790 
791 /**
792  * Set an event queue attribute.
793  *
794  * @param dev_id
795  *   Eventdev id
796  * @param queue_id
797  *   Eventdev queue id
798  * @param attr_id
799  *   The attribute ID to set
800  * @param attr_value
801  *   The attribute value to set
802  *
803  * @return
804  *   - 0: Successfully set attribute.
805  *   - -EINVAL: invalid device, queue or attr_id.
806  *   - -ENOTSUP: device does not support setting the event attribute.
807  *   - <0: failed to set event queue attribute
808  */
809 int
810 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
811 			 uint64_t attr_value);
812 
813 /* Event port specific APIs */
814 
815 /* Event port configuration bitmap flags */
816 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
817 /**< Configure the port not to release outstanding events in
818  * rte_event_dev_dequeue_burst(). If set, all events received through
819  * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
820  * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
821  * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
822  */
823 #define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)
824 /**< This event port links only to a single event queue.
825  *
826  *  @see rte_event_port_setup(), rte_event_port_link()
827  */
828 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER       (1ULL << 2)
829 /**< Hint that this event port will primarily enqueue events to the system.
830  * A PMD can optimize its internal workings by assuming that this port is
831  * primarily going to enqueue NEW events.
832  *
833  * Note that this flag is only a hint, so PMDs must operate under the
834  * assumption that any port can enqueue an event with any type of op.
835  *
836  *  @see rte_event_port_setup()
837  */
838 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER       (1ULL << 3)
839 /**< Hint that this event port will primarily dequeue events from the system.
840  * A PMD can optimize its internal workings by assuming that this port is
841  * primarily going to consume events, and not enqueue FORWARD or RELEASE
842  * events.
843  *
844  * Note that this flag is only a hint, so PMDs must operate under the
845  * assumption that any port can enqueue an event with any type of op.
846  *
847  *  @see rte_event_port_setup()
848  */
849 #define RTE_EVENT_PORT_CFG_HINT_WORKER         (1ULL << 4)
850 /**< Hint that this event port will primarily pass existing events through.
851  * A PMD can optimize its internal workings by assuming that this port is
852  * primarily going to FORWARD events, and not enqueue NEW or RELEASE events
853  * often.
854  *
855  * Note that this flag is only a hint, so PMDs must operate under the
856  * assumption that any port can enqueue an event with any type of op.
857  *
858  *  @see rte_event_port_setup()
859  */
860 
861 /** Event port configuration structure */
862 struct rte_event_port_conf {
863 	int32_t new_event_threshold;
864 	/**< A backpressure threshold for new event enqueues on this port.
865 	 * Use for *closed system* event dev where event capacity is limited,
866 	 * and cannot exceed the capacity of the event dev.
867 	 * Configuring ports with different thresholds can make higher priority
868 	 * traffic less likely to  be backpressured.
869 	 * For example, a port used to inject NIC Rx packets into the event dev
870 	 * can have a lower threshold so as not to overwhelm the device,
871 	 * while ports used for worker pools can have a higher threshold.
872 	 * This value cannot exceed the *nb_events_limit*
873 	 * which was previously supplied to rte_event_dev_configure().
874 	 * This should be set to '-1' for *open system*.
875 	 */
876 	uint16_t dequeue_depth;
877 	/**< Configure number of bulk dequeues for this event port.
878 	 * This value cannot exceed the *nb_event_port_dequeue_depth*
879 	 * which previously supplied to rte_event_dev_configure().
880 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
881 	 */
882 	uint16_t enqueue_depth;
883 	/**< Configure number of bulk enqueues for this event port.
884 	 * This value cannot exceed the *nb_event_port_enqueue_depth*
885 	 * which previously supplied to rte_event_dev_configure().
886 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
887 	 */
888 	uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */
889 };
890 
891 /**
892  * Retrieve the default configuration information of an event port designated
893  * by its *port_id* from the event driver for an event device.
894  *
895  * This function intended to be used in conjunction with rte_event_port_setup()
896  * where caller needs to set up the port by overriding few default values.
897  *
898  * @param dev_id
899  *   The identifier of the device.
900  * @param port_id
901  *   The index of the event port to get the configuration information.
902  *   The value must be in the range [0, nb_event_ports - 1]
903  *   previously supplied to rte_event_dev_configure().
904  * @param[out] port_conf
905  *   The pointer to the default event port configuration data
906  * @return
907  *   - 0: Success, driver updates the default event port configuration data.
908  *   - <0: Error code returned by the driver info get function.
909  *
910  * @see rte_event_port_setup()
911  */
912 int
913 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
914 				struct rte_event_port_conf *port_conf);
915 
916 /**
917  * Allocate and set up an event port for an event device.
918  *
919  * @param dev_id
920  *   The identifier of the device.
921  * @param port_id
922  *   The index of the event port to setup. The value must be in the range
923  *   [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
924  * @param port_conf
925  *   The pointer to the configuration data to be used for the queue.
926  *   NULL value is allowed, in which case default configuration	used.
927  *
928  * @see rte_event_port_default_conf_get()
929  *
930  * @return
931  *   - 0: Success, event port correctly set up.
932  *   - <0: Port configuration failed
933  *   - (-EDQUOT) Quota exceeded(Application tried to link the queue configured
934  *   with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
935  */
936 int
937 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
938 		     const struct rte_event_port_conf *port_conf);
939 
940 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
941 					  struct rte_event event, void *arg);
942 /**< Callback function prototype that can be passed during
943  * rte_event_port_release(), invoked once per a released event.
944  */
945 
946 /**
947  * Quiesce any core specific resources consumed by the event port.
948  *
949  * Event ports are generally coupled with lcores, and a given Hardware
950  * implementation might require the PMD to store port specific data in the
951  * lcore.
952  * When the application decides to migrate the event port to another lcore
953  * or teardown the current lcore it may to call `rte_event_port_quiesce`
954  * to make sure that all the data associated with the event port are released
955  * from the lcore, this might also include any prefetched events.
956  * While releasing the event port from the lcore, this function calls the
957  * user-provided flush callback once per event.
958  *
959  * @note Invocation of this API does not affect the existing port configuration.
960  *
961  * @param dev_id
962  *   The identifier of the device.
963  * @param port_id
964  *   The index of the event port to setup. The value must be in the range
965  *   [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
966  * @param release_cb
967  *   Callback function invoked once per flushed event.
968  * @param args
969  *   Argument supplied to callback.
970  */
971 void
972 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
973 		       rte_eventdev_port_flush_t release_cb, void *args);
974 
975 /**
976  * The queue depth of the port on the enqueue side
977  */
978 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
979 /**
980  * The queue depth of the port on the dequeue side
981  */
982 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
983 /**
984  * The new event threshold of the port
985  */
986 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
987 /**
988  * The implicit release disable attribute of the port
989  */
990 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
991 
992 /**
993  * Get an attribute from a port.
994  *
995  * @param dev_id
996  *   Eventdev id
997  * @param port_id
998  *   Eventdev port id
999  * @param attr_id
1000  *   The attribute ID to retrieve
1001  * @param[out] attr_value
1002  *   A pointer that will be filled in with the attribute value if successful
1003  *
1004  * @return
1005  *   - 0: Successfully returned value
1006  *   - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL
1007  */
1008 int
1009 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1010 			uint32_t *attr_value);
1011 
1012 /**
1013  * Start an event device.
1014  *
1015  * The device start step is the last one and consists of setting the event
1016  * queues to start accepting the events and schedules to event ports.
1017  *
1018  * On success, all basic functions exported by the API (event enqueue,
1019  * event dequeue and so on) can be invoked.
1020  *
1021  * @param dev_id
1022  *   Event device identifier
1023  * @return
1024  *   - 0: Success, device started.
1025  *   - -ESTALE : Not all ports of the device are configured
1026  *   - -ENOLINK: Not all queues are linked, which could lead to deadlock.
1027  */
1028 int
1029 rte_event_dev_start(uint8_t dev_id);
1030 
1031 /**
1032  * Stop an event device.
1033  *
1034  * This function causes all queued events to be drained, including those
1035  * residing in event ports. While draining events out of the device, this
1036  * function calls the user-provided flush callback (if one was registered) once
1037  * per event.
1038  *
1039  * The device can be restarted with a call to rte_event_dev_start(). Threads
1040  * that continue to enqueue/dequeue while the device is stopped, or being
1041  * stopped, will result in undefined behavior. This includes event adapters,
1042  * which must be stopped prior to stopping the eventdev.
1043  *
1044  * @param dev_id
1045  *   Event device identifier.
1046  *
1047  * @see rte_event_dev_stop_flush_callback_register()
1048  */
1049 void
1050 rte_event_dev_stop(uint8_t dev_id);
1051 
1052 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1053 					  struct rte_event event, void *arg);
1054 /**< Callback function called during rte_event_dev_stop(), invoked once per
1055  * flushed event.
1056  */
1057 
1058 /**
1059  * Registers a callback function to be invoked during rte_event_dev_stop() for
1060  * each flushed event. This function can be used to properly dispose of queued
1061  * events, for example events containing memory pointers.
1062  *
1063  * The callback function is only registered for the calling process. The
1064  * callback function must be registered in every process that can call
1065  * rte_event_dev_stop().
1066  *
1067  * To unregister a callback, call this function with a NULL callback pointer.
1068  *
1069  * @param dev_id
1070  *   The identifier of the device.
1071  * @param callback
1072  *   Callback function invoked once per flushed event.
1073  * @param userdata
1074  *   Argument supplied to callback.
1075  *
1076  * @return
1077  *  - 0 on success.
1078  *  - -EINVAL if *dev_id* is invalid
1079  *
1080  * @see rte_event_dev_stop()
1081  */
1082 int rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1083 					       rte_eventdev_stop_flush_t callback, void *userdata);
1084 
1085 /**
1086  * Close an event device. The device cannot be restarted!
1087  *
1088  * @param dev_id
1089  *   Event device identifier
1090  *
1091  * @return
1092  *  - 0 on successfully closing device
1093  *  - <0 on failure to close device
1094  *  - (-EAGAIN) if device is busy
1095  */
1096 int
1097 rte_event_dev_close(uint8_t dev_id);
1098 
1099 /**
1100  * Event vector structure.
1101  */
1102 struct rte_event_vector {
1103 	uint16_t nb_elem;
1104 	/**< Number of elements valid in this event vector. */
1105 	uint16_t elem_offset : 12;
1106 	/**< Offset into the vector array where valid elements start from. */
1107 	uint16_t rsvd : 3;
1108 	/**< Reserved for future use */
1109 	uint16_t attr_valid : 1;
1110 	/**< Indicates that the below union attributes have valid information.
1111 	 */
1112 	union {
1113 		/* Used by Rx/Tx adapter.
1114 		 * Indicates that all the elements in this vector belong to the
1115 		 * same port and queue pair when originating from Rx adapter,
1116 		 * valid only when event type is ETHDEV_VECTOR or
1117 		 * ETH_RX_ADAPTER_VECTOR.
1118 		 * Can also be used to indicate the Tx adapter the destination
1119 		 * port and queue of the mbufs in the vector
1120 		 */
1121 		struct {
1122 			uint16_t port;
1123 			/* Ethernet device port id. */
1124 			uint16_t queue;
1125 			/* Ethernet device queue id. */
1126 		};
1127 	};
1128 	/**< Union to hold common attributes of the vector array. */
1129 	uint64_t impl_opaque;
1130 
1131 /* empty structures do not have zero size in C++ leading to compilation errors
1132  * with clang about structure having different sizes in C and C++.
1133  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1134  * C++ builds, removing the warning.
1135  */
1136 #ifndef __cplusplus
1137 	/**< Implementation specific opaque value.
1138 	 * An implementation may use this field to hold implementation specific
1139 	 * value to share between dequeue and enqueue operation.
1140 	 * The application should not modify this field.
1141 	 */
1142 	union {
1143 #endif
1144 		struct rte_mbuf *mbufs[0];
1145 		void *ptrs[0];
1146 		uint64_t u64s[0];
1147 #ifndef __cplusplus
1148 	} __rte_aligned(16);
1149 #endif
1150 	/**< Start of the vector array union. Depending upon the event type the
1151 	 * vector array can be an array of mbufs or pointers or opaque u64
1152 	 * values.
1153 	 */
1154 } __rte_aligned(16);
1155 
1156 /* Scheduler type definitions */
1157 #define RTE_SCHED_TYPE_ORDERED          0
1158 /**< Ordered scheduling
1159  *
1160  * Events from an ordered flow of an event queue can be scheduled to multiple
1161  * ports for concurrent processing while maintaining the original event order.
1162  * This scheme enables the user to achieve high single flow throughput by
1163  * avoiding SW synchronization for ordering between ports which bound to cores.
1164  *
1165  * The source flow ordering from an event queue is maintained when events are
1166  * enqueued to their destination queue within the same ordered flow context.
1167  * An event port holds the context until application call
1168  * rte_event_dequeue_burst() from the same port, which implicitly releases
1169  * the context.
1170  * User may allow the scheduler to release the context earlier than that
1171  * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation.
1172  *
1173  * Events from the source queue appear in their original order when dequeued
1174  * from a destination queue.
1175  * Event ordering is based on the received event(s), but also other
1176  * (newly allocated or stored) events are ordered when enqueued within the same
1177  * ordered context. Events not enqueued (e.g. released or stored) within the
1178  * context are  considered missing from reordering and are skipped at this time
1179  * (but can be ordered again within another context).
1180  *
1181  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1182  */
1183 
1184 #define RTE_SCHED_TYPE_ATOMIC           1
1185 /**< Atomic scheduling
1186  *
1187  * Events from an atomic flow of an event queue can be scheduled only to a
1188  * single port at a time. The port is guaranteed to have exclusive (atomic)
1189  * access to the associated flow context, which enables the user to avoid SW
1190  * synchronization. Atomic flows also help to maintain event ordering
1191  * since only one port at a time can process events from a flow of an
1192  * event queue.
1193  *
1194  * The atomic queue synchronization context is dedicated to the port until
1195  * application call rte_event_dequeue_burst() from the same port,
1196  * which implicitly releases the context. User may allow the scheduler to
1197  * release the context earlier than that by invoking rte_event_enqueue_burst()
1198  * with RTE_EVENT_OP_RELEASE operation.
1199  *
1200  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1201  */
1202 
1203 #define RTE_SCHED_TYPE_PARALLEL         2
1204 /**< Parallel scheduling
1205  *
1206  * The scheduler performs priority scheduling, load balancing, etc. functions
1207  * but does not provide additional event synchronization or ordering.
1208  * It is free to schedule events from a single parallel flow of an event queue
1209  * to multiple events ports for concurrent processing.
1210  * The application is responsible for flow context synchronization and
1211  * event ordering (SW synchronization).
1212  *
1213  * @see rte_event_queue_setup(), rte_event_dequeue_burst()
1214  */
1215 
1216 /* Event types to classify the event source */
1217 #define RTE_EVENT_TYPE_ETHDEV           0x0
1218 /**< The event generated from ethdev subsystem */
1219 #define RTE_EVENT_TYPE_CRYPTODEV        0x1
1220 /**< The event generated from crypodev subsystem */
1221 #define RTE_EVENT_TYPE_TIMER		0x2
1222 /**< The event generated from event timer adapter */
1223 #define RTE_EVENT_TYPE_CPU              0x3
1224 /**< The event generated from cpu for pipelining.
1225  * Application may use *sub_event_type* to further classify the event
1226  */
1227 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER   0x4
1228 /**< The event generated from event eth Rx adapter */
1229 #define RTE_EVENT_TYPE_DMADEV           0x5
1230 /**< The event generated from dma subsystem */
1231 #define RTE_EVENT_TYPE_VECTOR           0x8
1232 /**< Indicates that event is a vector.
1233  * All vector event types should be a logical OR of EVENT_TYPE_VECTOR.
1234  * This simplifies the pipeline design as one can split processing the events
1235  * between vector events and normal event across event types.
1236  * Example:
1237  *	if (ev.event_type & RTE_EVENT_TYPE_VECTOR) {
1238  *		// Classify and handle vector event.
1239  *	} else {
1240  *		// Classify and handle event.
1241  *	}
1242  */
1243 #define RTE_EVENT_TYPE_ETHDEV_VECTOR                                           \
1244 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1245 /**< The event vector generated from ethdev subsystem */
1246 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1247 /**< The event vector generated from cpu for pipelining. */
1248 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR                                   \
1249 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1250 /**< The event vector generated from eth Rx adapter. */
1251 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
1252 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1253 /**< The event vector generated from cryptodev adapter. */
1254 
1255 #define RTE_EVENT_TYPE_MAX              0x10
1256 /**< Maximum number of event types */
1257 
1258 /* Event enqueue operations */
1259 #define RTE_EVENT_OP_NEW                0
1260 /**< The event producers use this operation to inject a new event to the
1261  * event device.
1262  */
1263 #define RTE_EVENT_OP_FORWARD            1
1264 /**< The CPU use this operation to forward the event to different event queue or
1265  * change to new application specific flow or schedule type to enable
1266  * pipelining.
1267  *
1268  * This operation must only be enqueued to the same port that the
1269  * event to be forwarded was dequeued from.
1270  */
1271 #define RTE_EVENT_OP_RELEASE            2
1272 /**< Release the flow context associated with the schedule type.
1273  *
1274  * If current flow's scheduler type method is *RTE_SCHED_TYPE_ATOMIC*
1275  * then this function hints the scheduler that the user has completed critical
1276  * section processing in the current atomic context.
1277  * The scheduler is now allowed to schedule events from the same flow from
1278  * an event queue to another port. However, the context may be still held
1279  * until the next rte_event_dequeue_burst() call, this call allows but does not
1280  * force the scheduler to release the context early.
1281  *
1282  * Early atomic context release may increase parallelism and thus system
1283  * performance, but the user needs to design carefully the split into critical
1284  * vs non-critical sections.
1285  *
1286  * If current flow's scheduler type method is *RTE_SCHED_TYPE_ORDERED*
1287  * then this function hints the scheduler that the user has done all that need
1288  * to maintain event order in the current ordered context.
1289  * The scheduler is allowed to release the ordered context of this port and
1290  * avoid reordering any following enqueues.
1291  *
1292  * Early ordered context release may increase parallelism and thus system
1293  * performance.
1294  *
1295  * If current flow's scheduler type method is *RTE_SCHED_TYPE_PARALLEL*
1296  * or no scheduling context is held then this function may be an NOOP,
1297  * depending on the implementation.
1298  *
1299  * This operation must only be enqueued to the same port that the
1300  * event to be released was dequeued from.
1301  */
1302 
1303 /**
1304  * The generic *rte_event* structure to hold the event attributes
1305  * for dequeue and enqueue operation
1306  */
1307 struct rte_event {
1308 	/** WORD0 */
1309 	union {
1310 		uint64_t event;
1311 		/** Event attributes for dequeue or enqueue operation */
1312 		struct {
1313 			uint32_t flow_id:20;
1314 			/**< Targeted flow identifier for the enqueue and
1315 			 * dequeue operation.
1316 			 * The value must be in the range of
1317 			 * [0, nb_event_queue_flows - 1] which
1318 			 * previously supplied to rte_event_dev_configure().
1319 			 */
1320 			uint32_t sub_event_type:8;
1321 			/**< Sub-event types based on the event source.
1322 			 * @see RTE_EVENT_TYPE_CPU
1323 			 */
1324 			uint32_t event_type:4;
1325 			/**< Event type to classify the event source.
1326 			 * @see RTE_EVENT_TYPE_ETHDEV, (RTE_EVENT_TYPE_*)
1327 			 */
1328 			uint8_t op:2;
1329 			/**< The type of event enqueue operation - new/forward/
1330 			 * etc.This field is not preserved across an instance
1331 			 * and is undefined on dequeue.
1332 			 * @see RTE_EVENT_OP_NEW, (RTE_EVENT_OP_*)
1333 			 */
1334 			uint8_t rsvd:4;
1335 			/**< Reserved for future use */
1336 			uint8_t sched_type:2;
1337 			/**< Scheduler synchronization type (RTE_SCHED_TYPE_*)
1338 			 * associated with flow id on a given event queue
1339 			 * for the enqueue and dequeue operation.
1340 			 */
1341 			uint8_t queue_id;
1342 			/**< Targeted event queue identifier for the enqueue or
1343 			 * dequeue operation.
1344 			 * The value must be in the range of
1345 			 * [0, nb_event_queues - 1] which previously supplied to
1346 			 * rte_event_dev_configure().
1347 			 */
1348 			uint8_t priority;
1349 			/**< Event priority relative to other events in the
1350 			 * event queue. The requested priority should in the
1351 			 * range of  [RTE_EVENT_DEV_PRIORITY_HIGHEST,
1352 			 * RTE_EVENT_DEV_PRIORITY_LOWEST].
1353 			 * The implementation shall normalize the requested
1354 			 * priority to supported priority value.
1355 			 * Valid when the device has
1356 			 * RTE_EVENT_DEV_CAP_EVENT_QOS capability.
1357 			 */
1358 			uint8_t impl_opaque;
1359 			/**< Implementation specific opaque value.
1360 			 * An implementation may use this field to hold
1361 			 * implementation specific value to share between
1362 			 * dequeue and enqueue operation.
1363 			 * The application should not modify this field.
1364 			 */
1365 		};
1366 	};
1367 	/** WORD1 */
1368 	union {
1369 		uint64_t u64;
1370 		/**< Opaque 64-bit value */
1371 		void *event_ptr;
1372 		/**< Opaque event pointer */
1373 		struct rte_mbuf *mbuf;
1374 		/**< mbuf pointer if dequeued event is associated with mbuf */
1375 		struct rte_event_vector *vec;
1376 		/**< Event vector pointer. */
1377 	};
1378 };
1379 
1380 /* Ethdev Rx adapter capability bitmap flags */
1381 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT	0x1
1382 /**< This flag is sent when the packet transfer mechanism is in HW.
1383  * Ethdev can send packets to the event device using internal event port.
1384  */
1385 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ	0x2
1386 /**< Adapter supports multiple event queues per ethdev. Every ethdev
1387  * Rx queue can be connected to a unique event queue.
1388  */
1389 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
1390 /**< The application can override the adapter generated flow ID in the
1391  * event. This flow ID can be specified when adding an ethdev Rx queue
1392  * to the adapter using the ev.flow_id member.
1393  * @see struct rte_event_eth_rx_adapter_queue_conf::ev
1394  * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
1395  */
1396 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR	0x8
1397 /**< Adapter supports event vectorization per ethdev. */
1398 
1399 /**
1400  * Retrieve the event device's ethdev Rx adapter capabilities for the
1401  * specified ethernet port
1402  *
1403  * @param dev_id
1404  *   The identifier of the device.
1405  *
1406  * @param eth_port_id
1407  *   The identifier of the ethernet device.
1408  *
1409  * @param[out] caps
1410  *   A pointer to memory filled with Rx event adapter capabilities.
1411  *
1412  * @return
1413  *   - 0: Success, driver provides Rx event adapter capabilities for the
1414  *	ethernet device.
1415  *   - <0: Error code returned by the driver function.
1416  */
1417 int
1418 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1419 				uint32_t *caps);
1420 
1421 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1422 /**< This flag is set when the timer mechanism is in HW. */
1423 
1424 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC      (1ULL << 1)
1425 /**< This flag is set if periodic mode is supported. */
1426 
1427 /**
1428  * Retrieve the event device's timer adapter capabilities.
1429  *
1430  * @param dev_id
1431  *   The identifier of the device.
1432  *
1433  * @param[out] caps
1434  *   A pointer to memory to be filled with event timer adapter capabilities.
1435  *
1436  * @return
1437  *   - 0: Success, driver provided event timer adapter capabilities.
1438  *   - <0: Error code returned by the driver function.
1439  */
1440 int
1441 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1442 
1443 /* Crypto adapter capability bitmap flag */
1444 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW   0x1
1445 /**< Flag indicates HW is capable of generating events in
1446  * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send
1447  * packets to the event device as new events using an internal
1448  * event port.
1449  */
1450 
1451 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD   0x2
1452 /**< Flag indicates HW is capable of generating events in
1453  * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send
1454  * packets to the event device as forwarded event using an
1455  * internal event port.
1456  */
1457 
1458 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND  0x4
1459 /**< Flag indicates HW is capable of mapping crypto queue pair to
1460  * event queue.
1461  */
1462 
1463 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA   0x8
1464 /**< Flag indicates HW/SW supports a mechanism to store and retrieve
1465  * the private data information along with the crypto session.
1466  */
1467 
1468 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
1469 /**< Flag indicates HW is capable of aggregating processed
1470  * crypto operations into rte_event_vector.
1471  */
1472 
1473 /**
1474  * Retrieve the event device's crypto adapter capabilities for the
1475  * specified cryptodev device
1476  *
1477  * @param dev_id
1478  *   The identifier of the device.
1479  *
1480  * @param cdev_id
1481  *   The identifier of the cryptodev device.
1482  *
1483  * @param[out] caps
1484  *   A pointer to memory filled with event adapter capabilities.
1485  *   It is expected to be pre-allocated & initialized by caller.
1486  *
1487  * @return
1488  *   - 0: Success, driver provides event adapter capabilities for the
1489  *     cryptodev device.
1490  *   - <0: Error code returned by the driver function.
1491  */
1492 int
1493 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1494 				  uint32_t *caps);
1495 
1496 /* DMA adapter capability bitmap flag */
1497 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1498 /**< Flag indicates HW is capable of generating events in
1499  * RTE_EVENT_OP_NEW enqueue operation. DMADEV will send
1500  * packets to the event device as new events using an
1501  * internal event port.
1502  */
1503 
1504 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1505 /**< Flag indicates HW is capable of generating events in
1506  * RTE_EVENT_OP_FORWARD enqueue operation. DMADEV will send
1507  * packets to the event device as forwarded event using an
1508  * internal event port.
1509  */
1510 
1511 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1512 /**< Flag indicates HW is capable of mapping DMA vchan to event queue. */
1513 
1514 /**
1515  * Retrieve the event device's DMA adapter capabilities for the
1516  * specified dmadev device
1517  *
1518  * @param dev_id
1519  *   The identifier of the device.
1520  *
1521  * @param dmadev_id
1522  *   The identifier of the dmadev device.
1523  *
1524  * @param[out] caps
1525  *   A pointer to memory filled with event adapter capabilities.
1526  *   It is expected to be pre-allocated & initialized by caller.
1527  *
1528  * @return
1529  *   - 0: Success, driver provides event adapter capabilities for the
1530  *     dmadev device.
1531  *   - <0: Error code returned by the driver function.
1532  *
1533  */
1534 __rte_experimental
1535 int
1536 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1537 
1538 /* Ethdev Tx adapter capability bitmap flags */
1539 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT	0x1
1540 /**< This flag is sent when the PMD supports a packet transmit callback
1541  */
1542 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR	0x2
1543 /**< Indicates that the Tx adapter is capable of handling event vector of
1544  * mbufs.
1545  */
1546 
1547 /**
1548  * Retrieve the event device's eth Tx adapter capabilities
1549  *
1550  * @param dev_id
1551  *   The identifier of the device.
1552  *
1553  * @param eth_port_id
1554  *   The identifier of the ethernet device.
1555  *
1556  * @param[out] caps
1557  *   A pointer to memory filled with eth Tx adapter capabilities.
1558  *
1559  * @return
1560  *   - 0: Success, driver provides eth Tx adapter capabilities.
1561  *   - <0: Error code returned by the driver function.
1562  */
1563 int
1564 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1565 				uint32_t *caps);
1566 
1567 /**
1568  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
1569  *
1570  * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag
1571  * then application can use this function to convert timeout value in
1572  * nanoseconds to implementations specific timeout value supplied in
1573  * rte_event_dequeue_burst()
1574  *
1575  * @param dev_id
1576  *   The identifier of the device.
1577  * @param ns
1578  *   Wait time in nanosecond
1579  * @param[out] timeout_ticks
1580  *   Value for the *timeout_ticks* parameter in rte_event_dequeue_burst()
1581  *
1582  * @return
1583  *  - 0 on success.
1584  *  - -ENOTSUP if the device doesn't support timeouts
1585  *  - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL
1586  *  - other values < 0 on failure.
1587  *
1588  * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
1589  * @see rte_event_dev_configure()
1590  */
1591 int
1592 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1593 					uint64_t *timeout_ticks);
1594 
1595 /**
1596  * Link multiple source event queues supplied in *queues* to the destination
1597  * event port designated by its *port_id* with associated service priority
1598  * supplied in *priorities* on the event device designated by its *dev_id*.
1599  *
1600  * The link establishment shall enable the event port *port_id* from
1601  * receiving events from the specified event queue(s) supplied in *queues*
1602  *
1603  * An event queue may link to one or more event ports.
1604  * The number of links can be established from an event queue to event port is
1605  * implementation defined.
1606  *
1607  * Event queue(s) to event port link establishment can be changed at runtime
1608  * without re-configuring the device to support scaling and to reduce the
1609  * latency of critical work by establishing the link with more event ports
1610  * at runtime.
1611  *
1612  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
1613  * than or equal to one, this function links the event queues to the default
1614  * profile_id i.e. profile_id 0 of the event port.
1615  *
1616  * @param dev_id
1617  *   The identifier of the device.
1618  *
1619  * @param port_id
1620  *   Event port identifier to select the destination port to link.
1621  *
1622  * @param queues
1623  *   Points to an array of *nb_links* event queues to be linked
1624  *   to the event port.
1625  *   NULL value is allowed, in which case this function links all the configured
1626  *   event queues *nb_event_queues* which previously supplied to
1627  *   rte_event_dev_configure() to the event port *port_id*
1628  *
1629  * @param priorities
1630  *   Points to an array of *nb_links* service priorities associated with each
1631  *   event queue link to event port.
1632  *   The priority defines the event port's servicing priority for
1633  *   event queue, which may be ignored by an implementation.
1634  *   The requested priority should in the range of
1635  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
1636  *   The implementation shall normalize the requested priority to
1637  *   implementation supported priority value.
1638  *   NULL value is allowed, in which case this function links the event queues
1639  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
1640  *
1641  * @param nb_links
1642  *   The number of links to establish. This parameter is ignored if queues is
1643  *   NULL.
1644  *
1645  * @return
1646  * The number of links actually established. The return value can be less than
1647  * the value of the *nb_links* parameter when the implementation has the
1648  * limitation on specific queue to port link establishment or if invalid
1649  * parameters are specified in *queues*
1650  * If the return value is less than *nb_links*, the remaining links at the end
1651  * of link[] are not established, and the caller has to take care of them.
1652  * If return value is less than *nb_links* then implementation shall update the
1653  * rte_errno accordingly, Possible rte_errno values are
1654  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
1655  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1656  * (EINVAL) Invalid parameter
1657  */
1658 int
1659 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1660 		    const uint8_t queues[], const uint8_t priorities[],
1661 		    uint16_t nb_links);
1662 
1663 /**
1664  * Unlink multiple source event queues supplied in *queues* from the destination
1665  * event port designated by its *port_id* on the event device designated
1666  * by its *dev_id*.
1667  *
1668  * The unlink call issues an async request to disable the event port *port_id*
1669  * from receiving events from the specified event queue *queue_id*.
1670  * Event queue(s) to event port unlink establishment can be changed at runtime
1671  * without re-configuring the device.
1672  *
1673  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
1674  * than or equal to one, this function unlinks the event queues from the default
1675  * profile identifier i.e. profile 0 of the event port.
1676  *
1677  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
1678  *
1679  * @param dev_id
1680  *   The identifier of the device.
1681  *
1682  * @param port_id
1683  *   Event port identifier to select the destination port to unlink.
1684  *
1685  * @param queues
1686  *   Points to an array of *nb_unlinks* event queues to be unlinked
1687  *   from the event port.
1688  *   NULL value is allowed, in which case this function unlinks all the
1689  *   event queue(s) from the event port *port_id*.
1690  *
1691  * @param nb_unlinks
1692  *   The number of unlinks to establish. This parameter is ignored if queues is
1693  *   NULL.
1694  *
1695  * @return
1696  * The number of unlinks successfully requested. The return value can be less
1697  * than the value of the *nb_unlinks* parameter when the implementation has the
1698  * limitation on specific queue to port unlink establishment or
1699  * if invalid parameters are specified.
1700  * If the return value is less than *nb_unlinks*, the remaining queues at the
1701  * end of queues[] are not unlinked, and the caller has to take care of them.
1702  * If return value is less than *nb_unlinks* then implementation shall update
1703  * the rte_errno accordingly, Possible rte_errno values are
1704  * (EINVAL) Invalid parameter
1705  */
1706 int
1707 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1708 		      uint8_t queues[], uint16_t nb_unlinks);
1709 
1710 /**
1711  * Link multiple source event queues supplied in *queues* to the destination
1712  * event port designated by its *port_id* with associated profile identifier
1713  * supplied in *profile_id* with service priorities supplied in *priorities*
1714  * on the event device designated by its *dev_id*.
1715  *
1716  * If *profile_id* is set to 0 then, the links created by the call `rte_event_port_link`
1717  * will be overwritten.
1718  *
1719  * Event ports by default use profile_id 0 unless it is changed using the
1720  * call ``rte_event_port_profile_switch()``.
1721  *
1722  * The link establishment shall enable the event port *port_id* from
1723  * receiving events from the specified event queue(s) supplied in *queues*
1724  *
1725  * An event queue may link to one or more event ports.
1726  * The number of links can be established from an event queue to event port is
1727  * implementation defined.
1728  *
1729  * Event queue(s) to event port link establishment can be changed at runtime
1730  * without re-configuring the device to support scaling and to reduce the
1731  * latency of critical work by establishing the link with more event ports
1732  * at runtime.
1733  *
1734  * @param dev_id
1735  *   The identifier of the device.
1736  *
1737  * @param port_id
1738  *   Event port identifier to select the destination port to link.
1739  *
1740  * @param queues
1741  *   Points to an array of *nb_links* event queues to be linked
1742  *   to the event port.
1743  *   NULL value is allowed, in which case this function links all the configured
1744  *   event queues *nb_event_queues* which previously supplied to
1745  *   rte_event_dev_configure() to the event port *port_id*
1746  *
1747  * @param priorities
1748  *   Points to an array of *nb_links* service priorities associated with each
1749  *   event queue link to event port.
1750  *   The priority defines the event port's servicing priority for
1751  *   event queue, which may be ignored by an implementation.
1752  *   The requested priority should in the range of
1753  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
1754  *   The implementation shall normalize the requested priority to
1755  *   implementation supported priority value.
1756  *   NULL value is allowed, in which case this function links the event queues
1757  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
1758  *
1759  * @param nb_links
1760  *   The number of links to establish. This parameter is ignored if queues is
1761  *   NULL.
1762  *
1763  * @param profile_id
1764  *   The profile identifier associated with the links between event queues and
1765  *   event port. Should be less than the max capability reported by
1766  *   ``rte_event_dev_info::max_profiles_per_port``
1767  *
1768  * @return
1769  * The number of links actually established. The return value can be less than
1770  * the value of the *nb_links* parameter when the implementation has the
1771  * limitation on specific queue to port link establishment or if invalid
1772  * parameters are specified in *queues*
1773  * If the return value is less than *nb_links*, the remaining links at the end
1774  * of link[] are not established, and the caller has to take care of them.
1775  * If return value is less than *nb_links* then implementation shall update the
1776  * rte_errno accordingly, Possible rte_errno values are
1777  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
1778  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1779  * (EINVAL) Invalid parameter
1780  *
1781  */
1782 __rte_experimental
1783 int
1784 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
1785 				 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
1786 
1787 /**
1788  * Unlink multiple source event queues supplied in *queues* that belong to profile
1789  * designated by *profile_id* from the destination event port designated by its
1790  * *port_id* on the event device designated by its *dev_id*.
1791  *
1792  * If *profile_id* is set to 0 i.e., the default profile then, then this function
1793  * will act as ``rte_event_port_unlink``.
1794  *
1795  * The unlink call issues an async request to disable the event port *port_id*
1796  * from receiving events from the specified event queue *queue_id*.
1797  * Event queue(s) to event port unlink establishment can be changed at runtime
1798  * without re-configuring the device.
1799  *
1800  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
1801  *
1802  * @param dev_id
1803  *   The identifier of the device.
1804  *
1805  * @param port_id
1806  *   Event port identifier to select the destination port to unlink.
1807  *
1808  * @param queues
1809  *   Points to an array of *nb_unlinks* event queues to be unlinked
1810  *   from the event port.
1811  *   NULL value is allowed, in which case this function unlinks all the
1812  *   event queue(s) from the event port *port_id*.
1813  *
1814  * @param nb_unlinks
1815  *   The number of unlinks to establish. This parameter is ignored if queues is
1816  *   NULL.
1817  *
1818  * @param profile_id
1819  *   The profile identifier associated with the links between event queues and
1820  *   event port. Should be less than the max capability reported by
1821  *   ``rte_event_dev_info::max_profiles_per_port``
1822  *
1823  * @return
1824  * The number of unlinks successfully requested. The return value can be less
1825  * than the value of the *nb_unlinks* parameter when the implementation has the
1826  * limitation on specific queue to port unlink establishment or
1827  * if invalid parameters are specified.
1828  * If the return value is less than *nb_unlinks*, the remaining queues at the
1829  * end of queues[] are not unlinked, and the caller has to take care of them.
1830  * If return value is less than *nb_unlinks* then implementation shall update
1831  * the rte_errno accordingly, Possible rte_errno values are
1832  * (EINVAL) Invalid parameter
1833  *
1834  */
1835 __rte_experimental
1836 int
1837 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1838 			      uint16_t nb_unlinks, uint8_t profile_id);
1839 
1840 /**
1841  * Returns the number of unlinks in progress.
1842  *
1843  * This function provides the application with a method to detect when an
1844  * unlink has been completed by the implementation.
1845  *
1846  * @see rte_event_port_unlink() to issue unlink requests.
1847  *
1848  * @param dev_id
1849  *   The identifier of the device.
1850  *
1851  * @param port_id
1852  *   Event port identifier to select port to check for unlinks in progress.
1853  *
1854  * @return
1855  * The number of unlinks that are in progress. A return of zero indicates that
1856  * there are no outstanding unlink requests. A positive return value indicates
1857  * the number of unlinks that are in progress, but are not yet complete.
1858  * A negative return value indicates an error, -EINVAL indicates an invalid
1859  * parameter passed for *dev_id* or *port_id*.
1860  */
1861 int
1862 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
1863 
1864 /**
1865  * Retrieve the list of source event queues and its associated service priority
1866  * linked to the destination event port designated by its *port_id*
1867  * on the event device designated by its *dev_id*.
1868  *
1869  * @param dev_id
1870  *   The identifier of the device.
1871  *
1872  * @param port_id
1873  *   Event port identifier.
1874  *
1875  * @param[out] queues
1876  *   Points to an array of *queues* for output.
1877  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1878  *   store the event queue(s) linked with event port *port_id*
1879  *
1880  * @param[out] priorities
1881  *   Points to an array of *priorities* for output.
1882  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1883  *   store the service priority associated with each event queue linked
1884  *
1885  * @return
1886  * The number of links established on the event port designated by its
1887  *  *port_id*.
1888  * - <0 on failure.
1889  */
1890 int
1891 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1892 			 uint8_t queues[], uint8_t priorities[]);
1893 
1894 /**
1895  * Retrieve the list of source event queues and its service priority
1896  * associated to a *profile_id* and linked to the destination event port
1897  * designated by its *port_id* on the event device designated by its *dev_id*.
1898  *
1899  * @param dev_id
1900  *   The identifier of the device.
1901  *
1902  * @param port_id
1903  *   Event port identifier.
1904  *
1905  * @param[out] queues
1906  *   Points to an array of *queues* for output.
1907  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1908  *   store the event queue(s) linked with event port *port_id*
1909  *
1910  * @param[out] priorities
1911  *   Points to an array of *priorities* for output.
1912  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1913  *   store the service priority associated with each event queue linked
1914  *
1915  * @param profile_id
1916  *   The profile identifier associated with the links between event queues and
1917  *   event port. Should be less than the max capability reported by
1918  *   ``rte_event_dev_info::max_profiles_per_port``
1919  *
1920  * @return
1921  * The number of links established on the event port designated by its
1922  *  *port_id*.
1923  * - <0 on failure.
1924  */
1925 __rte_experimental
1926 int
1927 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1928 				 uint8_t priorities[], uint8_t profile_id);
1929 
1930 /**
1931  * Retrieve the service ID of the event dev. If the adapter doesn't use
1932  * a rte_service function, this function returns -ESRCH.
1933  *
1934  * @param dev_id
1935  *   The identifier of the device.
1936  *
1937  * @param [out] service_id
1938  *   A pointer to a uint32_t, to be filled in with the service id.
1939  *
1940  * @return
1941  *   - 0: Success
1942  *   - <0: Error code on failure, if the event dev doesn't use a rte_service
1943  *   function, this function returns -ESRCH.
1944  */
1945 int
1946 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1947 
1948 /**
1949  * Dump internal information about *dev_id* to the FILE* provided in *f*.
1950  *
1951  * @param dev_id
1952  *   The identifier of the device.
1953  *
1954  * @param f
1955  *   A pointer to a file for output
1956  *
1957  * @return
1958  *   - 0: on success
1959  *   - <0: on failure.
1960  */
1961 int
1962 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1963 
1964 /** Maximum name length for extended statistics counters */
1965 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1966 
1967 /**
1968  * Selects the component of the eventdev to retrieve statistics from.
1969  */
1970 enum rte_event_dev_xstats_mode {
1971 	RTE_EVENT_DEV_XSTATS_DEVICE,
1972 	RTE_EVENT_DEV_XSTATS_PORT,
1973 	RTE_EVENT_DEV_XSTATS_QUEUE,
1974 };
1975 
1976 /**
1977  * A name-key lookup element for extended statistics.
1978  *
1979  * This structure is used to map between names and ID numbers
1980  * for extended ethdev statistics.
1981  */
1982 struct rte_event_dev_xstats_name {
1983 	char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1984 };
1985 
1986 /**
1987  * Retrieve names of extended statistics of an event device.
1988  *
1989  * @param dev_id
1990  *   The identifier of the event device.
1991  * @param mode
1992  *   The mode of statistics to retrieve. Choices include the device statistics,
1993  *   port statistics or queue statistics.
1994  * @param queue_port_id
1995  *   Used to specify the port or queue number in queue or port mode, and is
1996  *   ignored in device mode.
1997  * @param[out] xstats_names
1998  *   Block of memory to insert names into. Must be at least size in capacity.
1999  *   If set to NULL, function returns required capacity.
2000  * @param[out] ids
2001  *   Block of memory to insert ids into. Must be at least size in capacity.
2002  *   If set to NULL, function returns required capacity. The id values returned
2003  *   can be passed to *rte_event_dev_xstats_get* to select statistics.
2004  * @param size
2005  *   Capacity of xstats_names (number of names).
2006  * @return
2007  *   - positive value lower or equal to size: success. The return value
2008  *     is the number of entries filled in the stats table.
2009  *   - positive value higher than size: error, the given statistics table
2010  *     is too small. The return value corresponds to the size that should
2011  *     be given to succeed. The entries in the table are not valid and
2012  *     shall not be used by the caller.
2013  *   - negative value on error:
2014  *        -ENODEV for invalid *dev_id*
2015  *        -EINVAL for invalid mode, queue port or id parameters
2016  *        -ENOTSUP if the device doesn't support this function.
2017  */
2018 int
2019 rte_event_dev_xstats_names_get(uint8_t dev_id,
2020 			       enum rte_event_dev_xstats_mode mode,
2021 			       uint8_t queue_port_id,
2022 			       struct rte_event_dev_xstats_name *xstats_names,
2023 			       uint64_t *ids,
2024 			       unsigned int size);
2025 
2026 /**
2027  * Retrieve extended statistics of an event device.
2028  *
2029  * @param dev_id
2030  *   The identifier of the device.
2031  * @param mode
2032  *  The mode of statistics to retrieve. Choices include the device statistics,
2033  *  port statistics or queue statistics.
2034  * @param queue_port_id
2035  *   Used to specify the port or queue number in queue or port mode, and is
2036  *   ignored in device mode.
2037  * @param ids
2038  *   The id numbers of the stats to get. The ids can be got from the stat
2039  *   position in the stat list from rte_event_dev_get_xstats_names(), or
2040  *   by using rte_event_dev_xstats_by_name_get().
2041  * @param[out] values
2042  *   The values for each stats request by ID.
2043  * @param n
2044  *   The number of stats requested
2045  * @return
2046  *   - positive value: number of stat entries filled into the values array
2047  *   - negative value on error:
2048  *        -ENODEV for invalid *dev_id*
2049  *        -EINVAL for invalid mode, queue port or id parameters
2050  *        -ENOTSUP if the device doesn't support this function.
2051  */
2052 int
2053 rte_event_dev_xstats_get(uint8_t dev_id,
2054 			 enum rte_event_dev_xstats_mode mode,
2055 			 uint8_t queue_port_id,
2056 			 const uint64_t ids[],
2057 			 uint64_t values[], unsigned int n);
2058 
2059 /**
2060  * Retrieve the value of a single stat by requesting it by name.
2061  *
2062  * @param dev_id
2063  *   The identifier of the device
2064  * @param name
2065  *   The stat name to retrieve
2066  * @param[out] id
2067  *   If non-NULL, the numerical id of the stat will be returned, so that further
2068  *   requests for the stat can be got using rte_event_dev_xstats_get, which will
2069  *   be faster as it doesn't need to scan a list of names for the stat.
2070  *   If the stat cannot be found, the id returned will be (unsigned)-1.
2071  * @return
2072  *   - positive value or zero: the stat value
2073  *   - negative value: -EINVAL if stat not found, -ENOTSUP if not supported.
2074  */
2075 uint64_t
2076 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2077 				 uint64_t *id);
2078 
2079 /**
2080  * Reset the values of the xstats of the selected component in the device.
2081  *
2082  * @param dev_id
2083  *   The identifier of the device
2084  * @param mode
2085  *   The mode of the statistics to reset. Choose from device, queue or port.
2086  * @param queue_port_id
2087  *   The queue or port to reset. 0 and positive values select ports and queues,
2088  *   while -1 indicates all ports or queues.
2089  * @param ids
2090  *   Selects specific statistics to be reset. When NULL, all statistics selected
2091  *   by *mode* will be reset. If non-NULL, must point to array of at least
2092  *   *nb_ids* size.
2093  * @param nb_ids
2094  *   The number of ids available from the *ids* array. Ignored when ids is NULL.
2095  * @return
2096  *   - zero: successfully reset the statistics to zero
2097  *   - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported.
2098  */
2099 int
2100 rte_event_dev_xstats_reset(uint8_t dev_id,
2101 			   enum rte_event_dev_xstats_mode mode,
2102 			   int16_t queue_port_id,
2103 			   const uint64_t ids[],
2104 			   uint32_t nb_ids);
2105 
2106 /**
2107  * Trigger the eventdev self test.
2108  *
2109  * @param dev_id
2110  *   The identifier of the device
2111  * @return
2112  *   - 0: Selftest successful
2113  *   - -ENOTSUP if the device doesn't support selftest
2114  *   - other values < 0 on failure.
2115  */
2116 int rte_event_dev_selftest(uint8_t dev_id);
2117 
2118 /**
2119  * Get the memory required per event vector based on the number of elements per
2120  * vector.
2121  * This should be used to create the mempool that holds the event vectors.
2122  *
2123  * @param name
2124  *   The name of the vector pool.
2125  * @param n
2126  *   The number of elements in the mbuf pool.
2127  * @param cache_size
2128  *   Size of the per-core object cache. See rte_mempool_create() for
2129  *   details.
2130  * @param nb_elem
2131  *   The number of elements that a single event vector should be able to hold.
2132  * @param socket_id
2133  *   The socket identifier where the memory should be allocated. The
2134  *   value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the
2135  *   reserved zone
2136  *
2137  * @return
2138  *   The pointer to the newly allocated mempool, on success. NULL on error
2139  *   with rte_errno set appropriately. Possible rte_errno values include:
2140  *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
2141  *    - E_RTE_SECONDARY - function was called from a secondary process instance
2142  *    - EINVAL - cache size provided is too large, or priv_size is not aligned.
2143  *    - ENOSPC - the maximum number of memzones has already been allocated
2144  *    - EEXIST - a memzone with the same name already exists
2145  *    - ENOMEM - no appropriate memory area found in which to create memzone
2146  *    - ENAMETOOLONG - mempool name requested is too long.
2147  */
2148 struct rte_mempool *
2149 rte_event_vector_pool_create(const char *name, unsigned int n,
2150 			     unsigned int cache_size, uint16_t nb_elem,
2151 			     int socket_id);
2152 
2153 #include <rte_eventdev_core.h>
2154 
2155 static __rte_always_inline uint16_t
2156 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2157 			  const struct rte_event ev[], uint16_t nb_events,
2158 			  const event_enqueue_burst_t fn)
2159 {
2160 	const struct rte_event_fp_ops *fp_ops;
2161 	void *port;
2162 
2163 	fp_ops = &rte_event_fp_ops[dev_id];
2164 	port = fp_ops->data[port_id];
2165 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2166 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2167 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2168 		rte_errno = EINVAL;
2169 		return 0;
2170 	}
2171 
2172 	if (port == NULL) {
2173 		rte_errno = EINVAL;
2174 		return 0;
2175 	}
2176 #endif
2177 	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2178 	/*
2179 	 * Allow zero cost non burst mode routine invocation if application
2180 	 * requests nb_events as const one
2181 	 */
2182 	if (nb_events == 1)
2183 		return (fp_ops->enqueue)(port, ev);
2184 	else
2185 		return fn(port, ev, nb_events);
2186 }
2187 
2188 /**
2189  * Enqueue a burst of events objects or an event object supplied in *rte_event*
2190  * structure on an  event device designated by its *dev_id* through the event
2191  * port specified by *port_id*. Each event object specifies the event queue on
2192  * which it will be enqueued.
2193  *
2194  * The *nb_events* parameter is the number of event objects to enqueue which are
2195  * supplied in the *ev* array of *rte_event* structure.
2196  *
2197  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2198  * enqueued to the same port that their associated events were dequeued from.
2199  *
2200  * The rte_event_enqueue_burst() function returns the number of
2201  * events objects it actually enqueued. A return value equal to *nb_events*
2202  * means that all event objects have been enqueued.
2203  *
2204  * @param dev_id
2205  *   The identifier of the device.
2206  * @param port_id
2207  *   The identifier of the event port.
2208  * @param ev
2209  *   Points to an array of *nb_events* objects of type *rte_event* structure
2210  *   which contain the event object enqueue operations to be processed.
2211  * @param nb_events
2212  *   The number of event objects to enqueue, typically number of
2213  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2214  *   available for this port.
2215  *
2216  * @return
2217  *   The number of event objects actually enqueued on the event device. The
2218  *   return value can be less than the value of the *nb_events* parameter when
2219  *   the event devices queue is full or if invalid parameters are specified in a
2220  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2221  *   events at the end of ev[] are not consumed and the caller has to take care
2222  *   of them, and rte_errno is set accordingly. Possible errno values include:
2223  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2224  *              ID is invalid, or an event's sched type doesn't match the
2225  *              capabilities of the destination queue.
2226  *   - ENOSPC   The event port was backpressured and unable to enqueue
2227  *              one or more events. This error code is only applicable to
2228  *              closed systems.
2229  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2230  */
2231 static inline uint16_t
2232 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2233 			const struct rte_event ev[], uint16_t nb_events)
2234 {
2235 	const struct rte_event_fp_ops *fp_ops;
2236 
2237 	fp_ops = &rte_event_fp_ops[dev_id];
2238 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2239 					 fp_ops->enqueue_burst);
2240 }
2241 
2242 /**
2243  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
2244  * an event device designated by its *dev_id* through the event port specified
2245  * by *port_id*.
2246  *
2247  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2248  * application can use this API when the all objects in the burst contains
2249  * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
2250  * function can provide the additional hint to the PMD and optimize if possible.
2251  *
2252  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2253  * has event object of operation type != RTE_EVENT_OP_NEW.
2254  *
2255  * @param dev_id
2256  *   The identifier of the device.
2257  * @param port_id
2258  *   The identifier of the event port.
2259  * @param ev
2260  *   Points to an array of *nb_events* objects of type *rte_event* structure
2261  *   which contain the event object enqueue operations to be processed.
2262  * @param nb_events
2263  *   The number of event objects to enqueue, typically number of
2264  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2265  *   available for this port.
2266  *
2267  * @return
2268  *   The number of event objects actually enqueued on the event device. The
2269  *   return value can be less than the value of the *nb_events* parameter when
2270  *   the event devices queue is full or if invalid parameters are specified in a
2271  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2272  *   events at the end of ev[] are not consumed and the caller has to take care
2273  *   of them, and rte_errno is set accordingly. Possible errno values include:
2274  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2275  *              ID is invalid, or an event's sched type doesn't match the
2276  *              capabilities of the destination queue.
2277  *   - ENOSPC   The event port was backpressured and unable to enqueue
2278  *              one or more events. This error code is only applicable to
2279  *              closed systems.
2280  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2281  * @see rte_event_enqueue_burst()
2282  */
2283 static inline uint16_t
2284 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2285 			    const struct rte_event ev[], uint16_t nb_events)
2286 {
2287 	const struct rte_event_fp_ops *fp_ops;
2288 
2289 	fp_ops = &rte_event_fp_ops[dev_id];
2290 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2291 					 fp_ops->enqueue_new_burst);
2292 }
2293 
2294 /**
2295  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
2296  * on an event device designated by its *dev_id* through the event port
2297  * specified by *port_id*.
2298  *
2299  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2300  * application can use this API when the all objects in the burst contains
2301  * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
2302  * function can provide the additional hint to the PMD and optimize if possible.
2303  *
2304  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2305  * has event object of operation type != RTE_EVENT_OP_FORWARD.
2306  *
2307  * @param dev_id
2308  *   The identifier of the device.
2309  * @param port_id
2310  *   The identifier of the event port.
2311  * @param ev
2312  *   Points to an array of *nb_events* objects of type *rte_event* structure
2313  *   which contain the event object enqueue operations to be processed.
2314  * @param nb_events
2315  *   The number of event objects to enqueue, typically number of
2316  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2317  *   available for this port.
2318  *
2319  * @return
2320  *   The number of event objects actually enqueued on the event device. The
2321  *   return value can be less than the value of the *nb_events* parameter when
2322  *   the event devices queue is full or if invalid parameters are specified in a
2323  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2324  *   events at the end of ev[] are not consumed and the caller has to take care
2325  *   of them, and rte_errno is set accordingly. Possible errno values include:
2326  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2327  *              ID is invalid, or an event's sched type doesn't match the
2328  *              capabilities of the destination queue.
2329  *   - ENOSPC   The event port was backpressured and unable to enqueue
2330  *              one or more events. This error code is only applicable to
2331  *              closed systems.
2332  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2333  * @see rte_event_enqueue_burst()
2334  */
2335 static inline uint16_t
2336 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2337 				const struct rte_event ev[], uint16_t nb_events)
2338 {
2339 	const struct rte_event_fp_ops *fp_ops;
2340 
2341 	fp_ops = &rte_event_fp_ops[dev_id];
2342 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2343 					 fp_ops->enqueue_forward_burst);
2344 }
2345 
2346 /**
2347  * Dequeue a burst of events objects or an event object from the event port
2348  * designated by its *event_port_id*, on an event device designated
2349  * by its *dev_id*.
2350  *
2351  * rte_event_dequeue_burst() does not dictate the specifics of scheduling
2352  * algorithm as each eventdev driver may have different criteria to schedule
2353  * an event. However, in general, from an application perspective scheduler may
2354  * use the following scheme to dispatch an event to the port.
2355  *
2356  * 1) Selection of event queue based on
2357  *   a) The list of event queues are linked to the event port.
2358  *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
2359  *   queue selection from list is based on event queue priority relative to
2360  *   other event queue supplied as *priority* in rte_event_queue_setup()
2361  *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
2362  *   queue selection from the list is based on event priority supplied as
2363  *   *priority* in rte_event_enqueue_burst()
2364  * 2) Selection of event
2365  *   a) The number of flows available in selected event queue.
2366  *   b) Schedule type method associated with the event
2367  *
2368  * The *nb_events* parameter is the maximum number of event objects to dequeue
2369  * which are returned in the *ev* array of *rte_event* structure.
2370  *
2371  * The rte_event_dequeue_burst() function returns the number of events objects
2372  * it actually dequeued. A return value equal to *nb_events* means that all
2373  * event objects have been dequeued.
2374  *
2375  * The number of events dequeued is the number of scheduler contexts held by
2376  * this port. These contexts are automatically released in the next
2377  * rte_event_dequeue_burst() invocation if the port supports implicit
2378  * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
2379  * operation can be used to release the contexts early.
2380  *
2381  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2382  * enqueued to the same port that their associated events were dequeued from.
2383  *
2384  * @param dev_id
2385  *   The identifier of the device.
2386  * @param port_id
2387  *   The identifier of the event port.
2388  * @param[out] ev
2389  *   Points to an array of *nb_events* objects of type *rte_event* structure
2390  *   for output to be populated with the dequeued event objects.
2391  * @param nb_events
2392  *   The maximum number of event objects to dequeue, typically number of
2393  *   rte_event_port_dequeue_depth() available for this port.
2394  *
2395  * @param timeout_ticks
2396  *   - 0 no-wait, returns immediately if there is no event.
2397  *   - >0 wait for the event, if the device is configured with
2398  *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
2399  *   at least one event is available or *timeout_ticks* time.
2400  *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
2401  *   then this function will wait until the event available or
2402  *   *dequeue_timeout_ns* ns which was previously supplied to
2403  *   rte_event_dev_configure()
2404  *
2405  * @return
2406  * The number of event objects actually dequeued from the port. The return
2407  * value can be less than the value of the *nb_events* parameter when the
2408  * event port's queue is not full.
2409  *
2410  * @see rte_event_port_dequeue_depth()
2411  */
2412 static inline uint16_t
2413 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2414 			uint16_t nb_events, uint64_t timeout_ticks)
2415 {
2416 	const struct rte_event_fp_ops *fp_ops;
2417 	void *port;
2418 
2419 	fp_ops = &rte_event_fp_ops[dev_id];
2420 	port = fp_ops->data[port_id];
2421 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2422 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2423 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2424 		rte_errno = EINVAL;
2425 		return 0;
2426 	}
2427 
2428 	if (port == NULL) {
2429 		rte_errno = EINVAL;
2430 		return 0;
2431 	}
2432 #endif
2433 	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2434 	/*
2435 	 * Allow zero cost non burst mode routine invocation if application
2436 	 * requests nb_events as const one
2437 	 */
2438 	if (nb_events == 1)
2439 		return (fp_ops->dequeue)(port, ev, timeout_ticks);
2440 	else
2441 		return (fp_ops->dequeue_burst)(port, ev, nb_events,
2442 					       timeout_ticks);
2443 }
2444 
2445 #define RTE_EVENT_DEV_MAINT_OP_FLUSH          (1 << 0)
2446 /**< Force an immediately flush of any buffered events in the port,
2447  * potentially at the cost of additional overhead.
2448  *
2449  * @see rte_event_maintain()
2450  */
2451 
2452 /**
2453  * Maintain an event device.
2454  *
2455  * This function is only relevant for event devices which do not have
2456  * the @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE flag set. Such devices
2457  * require an application thread using a particular port to
2458  * periodically call rte_event_maintain() on that port during periods
2459  * which it is neither attempting to enqueue events to nor dequeue
2460  * events from the port. rte_event_maintain() is a low-overhead
2461  * function and should be called at a high rate (e.g., in the
2462  * application's poll loop).
2463  *
2464  * No port may be left unmaintained.
2465  *
2466  * At the application thread's convenience, rte_event_maintain() may
2467  * (but is not required to) be called even during periods when enqueue
2468  * or dequeue functions are being called, at the cost of a slight
2469  * increase in overhead.
2470  *
2471  * rte_event_maintain() may be called on event devices which have set
2472  * @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, in which case it is a
2473  * no-operation.
2474  *
2475  * @param dev_id
2476  *   The identifier of the device.
2477  * @param port_id
2478  *   The identifier of the event port.
2479  * @param op
2480  *   0, or @ref RTE_EVENT_DEV_MAINT_OP_FLUSH.
2481  * @return
2482  *  - 0 on success.
2483  *  - -EINVAL if *dev_id*,  *port_id*, or *op* is invalid.
2484  *
2485  * @see RTE_EVENT_DEV_CAP_MAINTENANCE_FREE
2486  */
2487 static inline int
2488 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2489 {
2490 	const struct rte_event_fp_ops *fp_ops;
2491 	void *port;
2492 
2493 	fp_ops = &rte_event_fp_ops[dev_id];
2494 	port = fp_ops->data[port_id];
2495 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2496 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2497 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2498 		return -EINVAL;
2499 
2500 	if (port == NULL)
2501 		return -EINVAL;
2502 
2503 	if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2504 		return -EINVAL;
2505 #endif
2506 	rte_eventdev_trace_maintain(dev_id, port_id, op);
2507 
2508 	if (fp_ops->maintain != NULL)
2509 		fp_ops->maintain(port, op);
2510 
2511 	return 0;
2512 }
2513 
2514 /**
2515  * Change the active profile on an event port.
2516  *
2517  * This function is used to change the current active profile on an event port
2518  * when multiple link profiles are configured on an event port through the
2519  * function call ``rte_event_port_profile_links_set``.
2520  *
2521  * On the subsequent ``rte_event_dequeue_burst`` call, only the event queues
2522  * that were associated with the newly active profile will participate in
2523  * scheduling.
2524  *
2525  * @param dev_id
2526  *   The identifier of the device.
2527  * @param port_id
2528  *   The identifier of the event port.
2529  * @param profile_id
2530  *   The identifier of the profile.
2531  * @return
2532  *  - 0 on success.
2533  *  - -EINVAL if *dev_id*,  *port_id*, or *profile_id* is invalid.
2534  */
2535 static inline uint8_t
2536 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2537 {
2538 	const struct rte_event_fp_ops *fp_ops;
2539 	void *port;
2540 
2541 	fp_ops = &rte_event_fp_ops[dev_id];
2542 	port = fp_ops->data[port_id];
2543 
2544 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2545 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2546 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2547 		return -EINVAL;
2548 
2549 	if (port == NULL)
2550 		return -EINVAL;
2551 
2552 	if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2553 		return -EINVAL;
2554 #endif
2555 	rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2556 
2557 	return fp_ops->profile_switch(port, profile_id);
2558 }
2559 
2560 #ifdef __cplusplus
2561 }
2562 #endif
2563 
2564 #endif /* _RTE_EVENTDEV_H_ */
2565