xref: /dpdk/lib/eventdev/rte_eventdev.h (revision da4b9651e09cbe7fc5e415c96e3f6ec7038a2c06)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
11 /**
12  * @file
13  *
14  * RTE Event Device API
15  * ====================
16  *
17  * In a traditional DPDK application model, the application polls Ethdev port RX
18  * queues to look for work, and processing is done in a run-to-completion manner,
19  * after which the packets are transmitted on a Ethdev TX queue. Load is
20  * distributed by statically assigning ports and queues to lcores, and NIC
21  * receive-side scaling (RSS), or similar, is employed to distribute network flows
22  * (and thus work) on the same port across multiple RX queues.
23  *
24  * In contrast, in an event-driven model, as supported by this "eventdev" library,
25  * incoming packets (or other input events) are fed into an event device, which
26  * schedules those packets across the available lcores, in accordance with its configuration.
27  * This event-driven programming model offers applications automatic multicore scaling,
28  * dynamic load balancing, pipelining, packet order maintenance, synchronization,
29  * and prioritization/quality of service.
30  *
31  * The Event Device API is composed of two parts:
32  *
33  * - The application-oriented Event API that includes functions to setup
34  *   an event device (configure it, setup its queues, ports and start it), to
35  *   establish the links between queues and ports to receive events, and so on.
36  *
37  * - The driver-oriented Event API that exports a function allowing
38  *   an event poll Mode Driver (PMD) to register itself as
39  *   an event device driver.
40  *
41  * Application-oriented Event API
42  * ------------------------------
43  *
44  * Event device components:
45  *
46  *                     +-----------------+
47  *                     | +-------------+ |
48  *        +-------+    | |    flow 0   | |
49  *        |Packet |    | +-------------+ |
50  *        |event  |    | +-------------+ |
51  *        |       |    | |    flow 1   | |port_link(port0, queue0)
52  *        +-------+    | +-------------+ |     |     +--------+
53  *        +-------+    | +-------------+ o-----v-----o        |dequeue +------+
54  *        |Crypto |    | |    flow n   | |           | event  +------->|Core 0|
55  *        |work   |    | +-------------+ o----+      | port 0 |        |      |
56  *        |done ev|    |  event queue 0  |    |      +--------+        +------+
57  *        +-------+    +-----------------+    |
58  *        +-------+                           |
59  *        |Timer  |    +-----------------+    |      +--------+
60  *        |expiry |    | +-------------+ |    +------o        |dequeue +------+
61  *        |event  |    | |    flow 0   | o-----------o event  +------->|Core 1|
62  *        +-------+    | +-------------+ |      +----o port 1 |        |      |
63  *       Event enqueue | +-------------+ |      |    +--------+        +------+
64  *     o-------------> | |    flow 1   | |      |
65  *        enqueue(     | +-------------+ |      |
66  *        queue_id,    |                 |      |    +--------+        +------+
67  *        flow_id,     | +-------------+ |      |    |        |dequeue |Core 2|
68  *        sched_type,  | |    flow n   | o-----------o event  +------->|      |
69  *        event_type,  | +-------------+ |      |    | port 2 |        +------+
70  *        subev_type,  |  event queue 1  |      |    +--------+
71  *        event)       +-----------------+      |    +--------+
72  *                                              |    |        |dequeue +------+
73  *        +-------+    +-----------------+      |    | event  +------->|Core n|
74  *        |Core   |    | +-------------+ o-----------o port n |        |      |
75  *        |(SW)   |    | |    flow 0   | |      |    +--------+        +--+---+
76  *        |event  |    | +-------------+ |      |                         |
77  *        +-------+    | +-------------+ |      |                         |
78  *            ^        | |    flow 1   | |      |                         |
79  *            |        | +-------------+ o------+                         |
80  *            |        | +-------------+ |                                |
81  *            |        | |    flow n   | |                                |
82  *            |        | +-------------+ |                                |
83  *            |        |  event queue n  |                                |
84  *            |        +-----------------+                                |
85  *            |                                                           |
86  *            +-----------------------------------------------------------+
87  *
88  * **Event device**: A hardware or software-based event scheduler.
89  *
90  * **Event**: Represents an item of work and is the smallest unit of scheduling.
91  * An event carries metadata, such as queue ID, scheduling type, and event priority,
92  * and data such as one or more packets or other kinds of buffers.
93  * Some examples of events are:
94  * - a software-generated item of work originating from a lcore,
95  *   perhaps carrying a packet to be processed.
96  * - a crypto work completion notification.
97  * - a timer expiry notification.
98  *
99  * **Event queue**: A queue containing events that are to be scheduled by the event device.
100  * An event queue contains events of different flows associated with scheduling
101  * types, such as atomic, ordered, or parallel.
102  * Each event given to an event device must have a valid event queue id field in the metadata,
103  * to specify on which event queue in the device the event must be placed,
104  * for later scheduling.
105  *
106  * **Event port**: An application's interface into the event dev for enqueue and
107  * dequeue operations. Each event port can be linked with one or more
108  * event queues for dequeue operations.
109  * Enqueue and dequeue from a port is not thread-safe, and the expected use-case is
110  * that each port is polled by only a single lcore. [If this is not the case,
111  * a suitable synchronization mechanism should be used to prevent simultaneous
112  * access from multiple lcores.]
113  * To schedule events to an lcore, the event device will schedule them to the event port(s)
114  * being polled by that lcore.
115  *
116  * *NOTE*: By default, all the functions of the Event Device API exported by a PMD
117  * are non-thread-safe functions, which must not be invoked on the same object in parallel on
118  * different logical cores.
119  * For instance, the dequeue function of a PMD cannot be invoked in parallel on two logical
120  * cores to operate on same  event port. Of course, this function
121  * can be invoked in parallel by different logical cores on different ports.
122  * It is the responsibility of the upper level application to enforce this rule.
123  *
124  * In all functions of the Event API, the Event device is
125  * designated by an integer >= 0 named the device identifier *dev_id*
126  *
127  * The functions exported by the application Event API to setup a device
128  * must be invoked in the following order:
129  *     - rte_event_dev_configure()
130  *     - rte_event_queue_setup()
131  *     - rte_event_port_setup()
132  *     - rte_event_port_link()
133  *     - rte_event_dev_start()
134  *
135  * Then, the application can invoke, in any order, the functions
136  * exported by the Event API to dequeue events, enqueue events,
137  * and link and unlink event queue(s) to event ports.
138  *
139  * Before configuring a device, an application should call rte_event_dev_info_get()
140  * to determine the capabilities of the event device, and any queue or port
141  * limits of that device. The parameters set in the various device configuration
142  * structures may need to be adjusted based on the max values provided in the
143  * device information structure returned from the rte_event_dev_info_get() API.
144  * An application may use rte_event_queue_default_conf_get() or
145  * rte_event_port_default_conf_get() to get the default configuration
146  * to set up an event queue or event port by overriding few default values.
147  *
148  * If the application wants to change the configuration (i.e. call
149  * rte_event_dev_configure(), rte_event_queue_setup(), or
150  * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the
151  * device and then do the reconfiguration before calling rte_event_dev_start()
152  * again. The schedule, enqueue and dequeue functions should not be invoked
153  * when the device is stopped.
154  *
155  * Finally, an application can close an Event device by invoking the
156  * rte_event_dev_close() function. Once closed, a device cannot be
157  * reconfigured or restarted.
158  *
159  * Driver-Oriented Event API
160  * -------------------------
161  *
162  * At the Event driver level, Event devices are represented by a generic
163  * data structure of type *rte_event_dev*.
164  *
165  * Event devices are dynamically registered during the PCI/SoC device probing
166  * phase performed at EAL initialization time.
167  * When an Event device is being probed, an *rte_event_dev* structure is allocated
168  * for it and the event_dev_init() function supplied by the Event driver
169  * is invoked to properly initialize the device.
170  *
171  * The role of the device init function is to reset the device hardware or
172  * to initialize the software event driver implementation.
173  *
174  * If the device init operation is successful, the device is assigned a device
175  * id (dev_id) for application use.
176  * Otherwise, the *rte_event_dev* structure is freed.
177  *
178  * Each function of the application Event API invokes a specific function
179  * of the PMD that controls the target device designated by its device
180  * identifier.
181  *
182  * For this purpose, all device-specific functions of an Event driver are
183  * supplied through a set of pointers contained in a generic structure of type
184  * *event_dev_ops*.
185  * The address of the *event_dev_ops* structure is stored in the *rte_event_dev*
186  * structure by the device init function of the Event driver, which is
187  * invoked during the PCI/SoC device probing phase, as explained earlier.
188  *
189  * In other words, each function of the Event API simply retrieves the
190  * *rte_event_dev* structure associated with the device identifier and
191  * performs an indirect invocation of the corresponding driver function
192  * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure.
193  *
194  * For performance reasons, the addresses of the fast-path functions of the
195  * event driver are not contained in the *event_dev_ops* structure.
196  * Instead, they are directly stored at the beginning of the *rte_event_dev*
197  * structure to avoid an extra indirect memory access during their invocation.
198  *
199  * Event Enqueue, Dequeue and Scheduling
200  * -------------------------------------
201  *
202  * RTE event device drivers do not use interrupts for enqueue or dequeue
203  * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue
204  * functions to applications.
205  *
206  * The events are injected to event device through *enqueue* operation by
207  * event producers in the system. The typical event producers are ethdev
208  * subsystem for generating packet events, CPU(SW) for generating events based
209  * on different stages of application processing, cryptodev for generating
210  * crypto work completion notification etc
211  *
212  * The *dequeue* operation gets one or more events from the event ports.
213  * The application processes the events and sends them to a downstream event queue through
214  * rte_event_enqueue_burst(), if it is an intermediate stage of event processing.
215  * On the final stage of processing, the application may use the Tx adapter API for maintaining
216  * the event ingress order while sending the packet/event on the wire via NIC Tx.
217  *
218  * The point at which events are scheduled to ports depends on the device.
219  * For hardware devices, scheduling occurs asynchronously without any software
220  * intervention. Software schedulers can either be distributed
221  * (each worker thread schedules events to its own port) or centralized
222  * (a dedicated thread schedules to all ports). Distributed software schedulers
223  * perform the scheduling inside the enqueue or dequeue functions, whereas centralized
224  * software schedulers need a dedicated service core for scheduling.
225  * The absence of the RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag
226  * indicates that the device is centralized and thus needs a dedicated scheduling
227  * thread (generally an RTE service that should be mapped to one or more service cores)
228  * that repeatedly calls the software specific scheduling function.
229  *
230  * An event driven worker thread has following typical workflow on fastpath:
231  * \code{.c}
232  *	while (1) {
233  *		rte_event_dequeue_burst(...);
234  *		(event processing)
235  *		rte_event_enqueue_burst(...);
236  *	}
237  * \endcode
238  */
239 
240 #ifdef __cplusplus
241 extern "C" {
242 #endif
243 
244 #include <rte_compat.h>
245 #include <rte_common.h>
246 #include <rte_errno.h>
247 #include <rte_mbuf_pool_ops.h>
248 #include <rte_mempool.h>
249 
250 #include "rte_eventdev_trace_fp.h"
251 
252 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
253 struct rte_event;
254 
255 /* Event device capability bitmap flags */
256 #define RTE_EVENT_DEV_CAP_QUEUE_QOS           (1ULL << 0)
257 /**< Event scheduling prioritization is based on the priority and weight
258  * associated with each event queue.
259  *
260  * Events from a queue with highest priority
261  * are scheduled first. If the queues are of same priority, weight of the queues
262  * are considered to select a queue in a weighted round robin fashion.
263  * Subsequent dequeue calls from an event port could see events from the same
264  * event queue, if the queue is configured with an affinity count. Affinity
265  * count is the number of subsequent dequeue calls, in which an event port
266  * should use the same event queue if the queue is non-empty
267  *
268  * NOTE: A device may use both queue prioritization and event prioritization
269  * (@ref RTE_EVENT_DEV_CAP_EVENT_QOS capability) when making packet scheduling decisions.
270  *
271  *  @see rte_event_queue_setup()
272  *  @see rte_event_queue_attr_set()
273  */
274 #define RTE_EVENT_DEV_CAP_EVENT_QOS           (1ULL << 1)
275 /**< Event scheduling prioritization is based on the priority associated with
276  *  each event.
277  *
278  *  Priority of each event is supplied in *rte_event* structure
279  *  on each enqueue operation.
280  *  If this capability is not set, the priority field of the event structure
281  *  is ignored for each event.
282  *
283  * NOTE: A device may use both queue prioritization (@ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability)
284  * and event prioritization when making packet scheduling decisions.
285 
286  *  @see rte_event_enqueue_burst()
287  */
288 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED   (1ULL << 2)
289 /**< Event device operates in distributed scheduling mode.
290  *
291  * In distributed scheduling mode, event scheduling happens in HW or
292  * rte_event_dequeue_burst() / rte_event_enqueue_burst() or the combination of these two.
293  * If the flag is not set then eventdev is centralized and thus needs a
294  * dedicated service core that acts as a scheduling thread.
295  *
296  * @see rte_event_dev_service_id_get()
297  */
298 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES     (1ULL << 3)
299 /**< Event device is capable of accepting enqueued events, of any type
300  * advertised as supported by the device, to all destination queues.
301  *
302  * When this capability is set, and @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag is set
303  * in @ref rte_event_queue_conf.event_queue_cfg, the "schedule_type" field of the
304  * @ref rte_event_queue_conf structure is ignored when a queue is being configured.
305  * Instead the "sched_type" field of each event enqueued is used to
306  * select the scheduling to be performed on that event.
307  *
308  * If this capability is not set, or the configuration flag is not set,
309  * the queue only supports events of the *RTE_SCHED_TYPE_* type specified
310  * in the @ref rte_event_queue_conf structure  at time of configuration.
311  * The behaviour when events of other scheduling types are sent to the queue is
312  * undefined.
313  *
314  * @see RTE_EVENT_QUEUE_CFG_ALL_TYPES
315  * @see RTE_SCHED_TYPE_ATOMIC
316  * @see RTE_SCHED_TYPE_ORDERED
317  * @see RTE_SCHED_TYPE_PARALLEL
318  * @see rte_event_queue_conf.event_queue_cfg
319  * @see rte_event_queue_conf.schedule_type
320  * @see rte_event_enqueue_burst()
321  */
322 #define RTE_EVENT_DEV_CAP_BURST_MODE          (1ULL << 4)
323 /**< Event device is capable of operating in burst mode for enqueue(forward,
324  * release) and dequeue operation.
325  *
326  * If this capability is not set, application
327  * can still use the rte_event_dequeue_burst() and rte_event_enqueue_burst() but
328  * PMD accepts or returns only one event at a time.
329  *
330  * @see rte_event_dequeue_burst()
331  * @see rte_event_enqueue_burst()
332  */
333 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE    (1ULL << 5)
334 /**< Event device ports support disabling the implicit release feature, in
335  * which the port will release all unreleased events in its dequeue operation.
336  *
337  * If this capability is set and the port is configured with implicit release
338  * disabled, the application is responsible for explicitly releasing events
339  * using either the @ref RTE_EVENT_OP_FORWARD or the @ref RTE_EVENT_OP_RELEASE event
340  * enqueue operations.
341  *
342  * @see rte_event_dequeue_burst()
343  * @see rte_event_enqueue_burst()
344  */
345 
346 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE         (1ULL << 6)
347 /**< Event device is capable of operating in non-sequential mode.
348  *
349  * The path of the event is not necessary to be sequential. Application can change
350  * the path of event at runtime and events may be sent to queues in any order.
351  *
352  * If the flag is not set, then event each event will follow a path from queue 0
353  * to queue 1 to queue 2 etc.
354  * The eventdev will return an error when the application enqueues an event for a
355  * qid which is not the next in the sequence.
356  */
357 
358 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK   (1ULL << 7)
359 /**< Event device is capable of reconfiguring the queue/port link at runtime.
360  *
361  * If the flag is not set, the eventdev queue/port link is only can be
362  * configured during  initialization, or by stopping the device and
363  * then later restarting it after reconfiguration.
364  *
365  * @see rte_event_port_link()
366  * @see rte_event_port_unlink()
367  */
368 
369 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
370 /**< Event device is capable of setting up links between multiple queues and a single port.
371  *
372  * If the flag is not set, each port may only be linked to a single queue, and
373  * so can only receive events from that queue.
374  * However, each queue may be linked to multiple ports.
375  *
376  * @see rte_event_port_link()
377  */
378 
379 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
380 /**< Event device preserves the flow ID from the enqueued event to the dequeued event.
381  *
382  * If this flag is not set,
383  * the content of the flow-id field in dequeued events is implementation dependent.
384  *
385  * @see rte_event_dequeue_burst()
386  */
387 
388 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
389 /**< Event device *does not* require calls to rte_event_maintain().
390  *
391  * An event device that does not set this flag requires calls to
392  * rte_event_maintain() during periods when neither
393  * rte_event_dequeue_burst() nor rte_event_enqueue_burst() are called
394  * on a port. This will allow the event device to perform internal
395  * processing, such as flushing buffered events, return credits to a
396  * global pool, or process signaling related to load balancing.
397  *
398  * @see rte_event_maintain()
399  */
400 
401 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
402 /**< Event device is capable of changing the queue attributes at runtime i.e
403  * after rte_event_queue_setup() or rte_event_dev_start() call sequence.
404  *
405  * If this flag is not set, event queue attributes can only be configured during
406  * rte_event_queue_setup().
407  *
408  * @see rte_event_queue_setup()
409  */
410 
411 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
412 /**< Event device is capable of supporting multiple link profiles per event port.
413  *
414  * When set, the value of `rte_event_dev_info::max_profiles_per_port` is greater
415  * than one, and multiple profiles may be configured and then switched at runtime.
416  * If not set, only a single profile may be configured, which may itself be
417  * runtime adjustable (if @ref RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK is set).
418  *
419  * @see rte_event_port_profile_links_set()
420  * @see rte_event_port_profile_links_get()
421  * @see rte_event_port_profile_switch()
422  * @see RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK
423  */
424 
425 #define RTE_EVENT_DEV_CAP_ATOMIC  (1ULL << 13)
426 /**< Event device is capable of atomic scheduling.
427  * When this flag is set, the application can configure queues with scheduling type
428  * atomic on this event device.
429  *
430  * @see RTE_SCHED_TYPE_ATOMIC
431  */
432 
433 #define RTE_EVENT_DEV_CAP_ORDERED  (1ULL << 14)
434 /**< Event device is capable of ordered scheduling.
435  * When this flag is set, the application can configure queues with scheduling type
436  * ordered on this event device.
437  *
438  * @see RTE_SCHED_TYPE_ORDERED
439  */
440 
441 #define RTE_EVENT_DEV_CAP_PARALLEL  (1ULL << 15)
442 /**< Event device is capable of parallel scheduling.
443  * When this flag is set, the application can configure queues with scheduling type
444  * parallel on this event device.
445  *
446  * @see RTE_SCHED_TYPE_PARALLEL
447  */
448 
449 /* Event device priority levels */
450 #define RTE_EVENT_DEV_PRIORITY_HIGHEST   0
451 /**< Highest priority level for events and queues.
452  *
453  * @see rte_event_queue_setup()
454  * @see rte_event_enqueue_burst()
455  * @see rte_event_port_link()
456  */
457 #define RTE_EVENT_DEV_PRIORITY_NORMAL    128
458 /**< Normal priority level for events and queues.
459  *
460  * @see rte_event_queue_setup()
461  * @see rte_event_enqueue_burst()
462  * @see rte_event_port_link()
463  */
464 #define RTE_EVENT_DEV_PRIORITY_LOWEST    255
465 /**< Lowest priority level for events and queues.
466  *
467  * @see rte_event_queue_setup()
468  * @see rte_event_enqueue_burst()
469  * @see rte_event_port_link()
470  */
471 
472 /* Event queue scheduling weights */
473 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
474 /**< Highest weight of an event queue.
475  *
476  * @see rte_event_queue_attr_get()
477  * @see rte_event_queue_attr_set()
478  */
479 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
480 /**< Lowest weight of an event queue.
481  *
482  * @see rte_event_queue_attr_get()
483  * @see rte_event_queue_attr_set()
484  */
485 
486 /* Event queue scheduling affinity */
487 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
488 /**< Highest scheduling affinity of an event queue.
489  *
490  * @see rte_event_queue_attr_get()
491  * @see rte_event_queue_attr_set()
492  */
493 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
494 /**< Lowest scheduling affinity of an event queue.
495  *
496  * @see rte_event_queue_attr_get()
497  * @see rte_event_queue_attr_set()
498  */
499 
500 /**
501  * Get the total number of event devices that have been successfully
502  * initialised.
503  *
504  * @return
505  *   The total number of usable event devices.
506  */
507 uint8_t
508 rte_event_dev_count(void);
509 
510 /**
511  * Get the device identifier for the named event device.
512  *
513  * @param name
514  *   Event device name to select the event device identifier.
515  *
516  * @return
517  *   Returns event device identifier on success.
518  *   - <0: Failure to find named event device.
519  */
520 int
521 rte_event_dev_get_dev_id(const char *name);
522 
523 /**
524  * Return the NUMA socket to which a device is connected.
525  *
526  * @param dev_id
527  *   The identifier of the device.
528  * @return
529  *   The NUMA socket id to which the device is connected or
530  *   a default of zero if the socket could not be determined.
531  *   -(-EINVAL)  dev_id value is out of range.
532  */
533 int
534 rte_event_dev_socket_id(uint8_t dev_id);
535 
536 /**
537  * Event device information
538  */
539 struct rte_event_dev_info {
540 	const char *driver_name;	/**< Event driver name. */
541 	struct rte_device *dev;	/**< Device information. */
542 	uint32_t min_dequeue_timeout_ns;
543 	/**< Minimum global dequeue timeout(ns) supported by this device. */
544 	uint32_t max_dequeue_timeout_ns;
545 	/**< Maximum global dequeue timeout(ns) supported by this device. */
546 	uint32_t dequeue_timeout_ns;
547 	/**< Configured global dequeue timeout(ns) for this device. */
548 	uint8_t max_event_queues;
549 	/**< Maximum event queues supported by this device.
550 	 *
551 	 * This count excludes any queues covered by @ref max_single_link_event_port_queue_pairs.
552 	 */
553 	uint32_t max_event_queue_flows;
554 	/**< Maximum number of flows within an event queue supported by this device. */
555 	uint8_t max_event_queue_priority_levels;
556 	/**< Maximum number of event queue priority levels supported by this device.
557 	 *
558 	 * Valid when the device has @ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
559 	 *
560 	 * The implementation shall normalize priority values specified between
561 	 * @ref RTE_EVENT_DEV_PRIORITY_HIGHEST and @ref RTE_EVENT_DEV_PRIORITY_LOWEST
562 	 * to map them internally to this range of priorities.
563 	 * [For devices supporting a power-of-2 number of priority levels, this
564 	 * normalization will be done via a right-shift operation, so only the top
565 	 * log2(max_levels) bits will be used by the event device.]
566 	 *
567 	 * @see rte_event_queue_conf.priority
568 	 */
569 	uint8_t max_event_priority_levels;
570 	/**< Maximum number of event priority levels by this device.
571 	 *
572 	 * Valid when the device has @ref RTE_EVENT_DEV_CAP_EVENT_QOS capability.
573 	 *
574 	 * The implementation shall normalize priority values specified between
575 	 * @ref RTE_EVENT_DEV_PRIORITY_HIGHEST and @ref RTE_EVENT_DEV_PRIORITY_LOWEST
576 	 * to map them internally to this range of priorities.
577 	 * [For devices supporting a power-of-2 number of priority levels, this
578 	 * normalization will be done via a right-shift operation, so only the top
579 	 * log2(max_levels) bits will be used by the event device.]
580 	 *
581 	 * @see rte_event.priority
582 	 */
583 	uint8_t max_event_ports;
584 	/**< Maximum number of event ports supported by this device.
585 	 *
586 	 * This count excludes any ports covered by @ref max_single_link_event_port_queue_pairs.
587 	 */
588 	uint8_t max_event_port_dequeue_depth;
589 	/**< Maximum number of events that can be dequeued at a time from an event port
590 	 * on this device.
591 	 *
592 	 * A device that does not support burst dequeue
593 	 * (@ref RTE_EVENT_DEV_CAP_BURST_MODE) will set this to 1.
594 	 */
595 	uint32_t max_event_port_enqueue_depth;
596 	/**< Maximum number of events that can be enqueued at a time to an event port
597 	 * on this device.
598 	 *
599 	 * A device that does not support burst enqueue
600 	 * (@ref RTE_EVENT_DEV_CAP_BURST_MODE) will set this to 1.
601 	 */
602 	uint8_t max_event_port_links;
603 	/**< Maximum number of queues that can be linked to a single event port on this device.
604 	 */
605 	int32_t max_num_events;
606 	/**< A *closed system* event dev has a limit on the number of events it
607 	 * can manage at a time.
608 	 * Once the number of events tracked by an eventdev exceeds this number,
609 	 * any enqueues of NEW events will fail.
610 	 * An *open system* event dev does not have a limit and will specify this as -1.
611 	 */
612 	uint32_t event_dev_cap;
613 	/**< Event device capabilities flags (RTE_EVENT_DEV_CAP_*). */
614 	uint8_t max_single_link_event_port_queue_pairs;
615 	/**< Maximum number of event ports and queues, supported by this device,
616 	 * that are optimized for (and only capable of) single-link configurations.
617 	 * These ports and queues are not accounted for in @ref max_event_ports
618 	 * or @ref max_event_queues.
619 	 */
620 	uint8_t max_profiles_per_port;
621 	/**< Maximum number of event queue link profiles per event port.
622 	 * A device that doesn't support multiple profiles will set this as 1.
623 	 */
624 };
625 
626 /**
627  * Retrieve the contextual information of an event device.
628  *
629  * @param dev_id
630  *   The identifier of the device.
631  *
632  * @param[out] dev_info
633  *   A pointer to a structure of type *rte_event_dev_info* to be filled with the
634  *   contextual information of the device.
635  *
636  * @return
637  *   - 0: Success, driver updates the contextual information of the event device
638  *   - <0: Error code returned by the driver info get function.
639  */
640 int
641 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
642 
643 /**
644  * The count of ports.
645  */
646 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
647 /**
648  * The count of queues.
649  */
650 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
651 /**
652  * The status of the device, zero for stopped, non-zero for started.
653  */
654 #define RTE_EVENT_DEV_ATTR_STARTED 2
655 
656 /**
657  * Get an attribute from a device.
658  *
659  * @param dev_id Eventdev id
660  * @param attr_id The attribute ID to retrieve
661  * @param[out] attr_value A pointer that will be filled in with the attribute
662  *             value if successful.
663  *
664  * @return
665  *   - 0: Successfully retrieved attribute value
666  *   - -EINVAL: Invalid device or  *attr_id* provided, or *attr_value* is NULL
667  */
668 int
669 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
670 		       uint32_t *attr_value);
671 
672 
673 /* Event device configuration bitmap flags */
674 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
675 /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns.
676  *  @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst()
677  */
678 
679 /** Event device configuration structure */
680 struct rte_event_dev_config {
681 	uint32_t dequeue_timeout_ns;
682 	/**< rte_event_dequeue_burst() timeout on this device.
683 	 * This value should be in the range of *min_dequeue_timeout_ns* and
684 	 * *max_dequeue_timeout_ns* which previously provided in
685 	 * rte_event_dev_info_get()
686 	 * The value 0 is allowed, in which case, default dequeue timeout used.
687 	 * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
688 	 */
689 	int32_t nb_events_limit;
690 	/**< In a *closed system* this field is the limit on maximum number of
691 	 * events that can be inflight in the eventdev at a given time. The
692 	 * limit is required to ensure that the finite space in a closed system
693 	 * is not overwhelmed. The value cannot exceed the *max_num_events*
694 	 * as provided by rte_event_dev_info_get().
695 	 * This value should be set to -1 for *open system*.
696 	 */
697 	uint8_t nb_event_queues;
698 	/**< Number of event queues to configure on this device.
699 	 * This value cannot exceed the *max_event_queues* which previously
700 	 * provided in rte_event_dev_info_get()
701 	 */
702 	uint8_t nb_event_ports;
703 	/**< Number of event ports to configure on this device.
704 	 * This value cannot exceed the *max_event_ports* which previously
705 	 * provided in rte_event_dev_info_get()
706 	 */
707 	uint32_t nb_event_queue_flows;
708 	/**< Number of flows for any event queue on this device.
709 	 * This value cannot exceed the *max_event_queue_flows* which previously
710 	 * provided in rte_event_dev_info_get()
711 	 */
712 	uint32_t nb_event_port_dequeue_depth;
713 	/**< Maximum number of events can be dequeued at a time from an
714 	 * event port by this device.
715 	 * This value cannot exceed the *max_event_port_dequeue_depth*
716 	 * which previously provided in rte_event_dev_info_get().
717 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
718 	 * @see rte_event_port_setup()
719 	 */
720 	uint32_t nb_event_port_enqueue_depth;
721 	/**< Maximum number of events can be enqueued at a time from an
722 	 * event port by this device.
723 	 * This value cannot exceed the *max_event_port_enqueue_depth*
724 	 * which previously provided in rte_event_dev_info_get().
725 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
726 	 * @see rte_event_port_setup()
727 	 */
728 	uint32_t event_dev_cfg;
729 	/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
730 	uint8_t nb_single_link_event_port_queues;
731 	/**< Number of event ports and queues that will be singly-linked to
732 	 * each other. These are a subset of the overall event ports and
733 	 * queues; this value cannot exceed *nb_event_ports* or
734 	 * *nb_event_queues*. If the device has ports and queues that are
735 	 * optimized for single-link usage, this field is a hint for how many
736 	 * to allocate; otherwise, regular event ports and queues can be used.
737 	 */
738 };
739 
740 /**
741  * Configure an event device.
742  *
743  * This function must be invoked first before any other function in the
744  * API. This function can also be re-invoked when a device is in the
745  * stopped state.
746  *
747  * The caller may use rte_event_dev_info_get() to get the capability of each
748  * resources available for this event device.
749  *
750  * @param dev_id
751  *   The identifier of the device to configure.
752  * @param dev_conf
753  *   The event device configuration structure.
754  *
755  * @return
756  *   - 0: Success, device configured.
757  *   - <0: Error code returned by the driver configuration function.
758  */
759 int
760 rte_event_dev_configure(uint8_t dev_id,
761 			const struct rte_event_dev_config *dev_conf);
762 
763 /* Event queue specific APIs */
764 
765 /* Event queue configuration bitmap flags */
766 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES          (1ULL << 0)
767 /**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue
768  *
769  * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
770  * @see rte_event_enqueue_burst()
771  */
772 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK        (1ULL << 1)
773 /**< This event queue links only to a single event port.
774  *
775  *  @see rte_event_port_setup(), rte_event_port_link()
776  */
777 
778 /** Event queue configuration structure */
779 struct rte_event_queue_conf {
780 	uint32_t nb_atomic_flows;
781 	/**< The maximum number of active flows this queue can track at any
782 	 * given time. If the queue is configured for atomic scheduling (by
783 	 * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg
784 	 * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the
785 	 * value must be in the range of [1, nb_event_queue_flows], which was
786 	 * previously provided in rte_event_dev_configure().
787 	 */
788 	uint32_t nb_atomic_order_sequences;
789 	/**< The maximum number of outstanding events waiting to be
790 	 * reordered by this queue. In other words, the number of entries in
791 	 * this queue’s reorder buffer.When the number of events in the
792 	 * reorder buffer reaches to *nb_atomic_order_sequences* then the
793 	 * scheduler cannot schedule the events from this queue and invalid
794 	 * event will be returned from dequeue until one or more entries are
795 	 * freed up/released.
796 	 * If the queue is configured for ordered scheduling (by applying the
797 	 * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or
798 	 * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must
799 	 * be in the range of [1, nb_event_queue_flows], which was
800 	 * previously supplied to rte_event_dev_configure().
801 	 */
802 	uint32_t event_queue_cfg;
803 	/**< Queue cfg flags(EVENT_QUEUE_CFG_) */
804 	uint8_t schedule_type;
805 	/**< Queue schedule type(RTE_SCHED_TYPE_*).
806 	 * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in
807 	 * event_queue_cfg.
808 	 */
809 	uint8_t priority;
810 	/**< Priority for this event queue relative to other event queues.
811 	 * The requested priority should in the range of
812 	 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
813 	 * The implementation shall normalize the requested priority to
814 	 * event device supported priority value.
815 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
816 	 */
817 	uint8_t weight;
818 	/**< Weight of the event queue relative to other event queues.
819 	 * The requested weight should be in the range of
820 	 * [RTE_EVENT_DEV_WEIGHT_HIGHEST, RTE_EVENT_DEV_WEIGHT_LOWEST].
821 	 * The implementation shall normalize the requested weight to event
822 	 * device supported weight value.
823 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
824 	 */
825 	uint8_t affinity;
826 	/**< Affinity of the event queue relative to other event queues.
827 	 * The requested affinity should be in the range of
828 	 * [RTE_EVENT_DEV_AFFINITY_HIGHEST, RTE_EVENT_DEV_AFFINITY_LOWEST].
829 	 * The implementation shall normalize the requested affinity to event
830 	 * device supported affinity value.
831 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
832 	 */
833 };
834 
835 /**
836  * Retrieve the default configuration information of an event queue designated
837  * by its *queue_id* from the event driver for an event device.
838  *
839  * This function intended to be used in conjunction with rte_event_queue_setup()
840  * where caller needs to set up the queue by overriding few default values.
841  *
842  * @param dev_id
843  *   The identifier of the device.
844  * @param queue_id
845  *   The index of the event queue to get the configuration information.
846  *   The value must be in the range [0, nb_event_queues - 1]
847  *   previously supplied to rte_event_dev_configure().
848  * @param[out] queue_conf
849  *   The pointer to the default event queue configuration data.
850  * @return
851  *   - 0: Success, driver updates the default event queue configuration data.
852  *   - <0: Error code returned by the driver info get function.
853  *
854  * @see rte_event_queue_setup()
855  */
856 int
857 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
858 				 struct rte_event_queue_conf *queue_conf);
859 
860 /**
861  * Allocate and set up an event queue for an event device.
862  *
863  * @param dev_id
864  *   The identifier of the device.
865  * @param queue_id
866  *   The index of the event queue to setup. The value must be in the range
867  *   [0, nb_event_queues - 1] previously supplied to rte_event_dev_configure().
868  * @param queue_conf
869  *   The pointer to the configuration data to be used for the event queue.
870  *   NULL value is allowed, in which case default configuration	used.
871  *
872  * @see rte_event_queue_default_conf_get()
873  *
874  * @return
875  *   - 0: Success, event queue correctly set up.
876  *   - <0: event queue configuration failed
877  */
878 int
879 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
880 		      const struct rte_event_queue_conf *queue_conf);
881 
882 /**
883  * The priority of the queue.
884  */
885 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
886 /**
887  * The number of atomic flows configured for the queue.
888  */
889 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
890 /**
891  * The number of atomic order sequences configured for the queue.
892  */
893 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
894 /**
895  * The cfg flags for the queue.
896  */
897 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
898 /**
899  * The schedule type of the queue.
900  */
901 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
902 /**
903  * The weight of the queue.
904  */
905 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
906 /**
907  * Affinity of the queue.
908  */
909 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
910 
911 /**
912  * Get an attribute from a queue.
913  *
914  * @param dev_id
915  *   Eventdev id
916  * @param queue_id
917  *   Eventdev queue id
918  * @param attr_id
919  *   The attribute ID to retrieve
920  * @param[out] attr_value
921  *   A pointer that will be filled in with the attribute value if successful
922  *
923  * @return
924  *   - 0: Successfully returned value
925  *   - -EINVAL: invalid device, queue or attr_id provided, or attr_value was
926  *		NULL
927  *   - -EOVERFLOW: returned when attr_id is set to
928  *   RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to
929  *   RTE_EVENT_QUEUE_CFG_ALL_TYPES
930  */
931 int
932 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
933 			uint32_t *attr_value);
934 
935 /**
936  * Set an event queue attribute.
937  *
938  * @param dev_id
939  *   Eventdev id
940  * @param queue_id
941  *   Eventdev queue id
942  * @param attr_id
943  *   The attribute ID to set
944  * @param attr_value
945  *   The attribute value to set
946  *
947  * @return
948  *   - 0: Successfully set attribute.
949  *   - -EINVAL: invalid device, queue or attr_id.
950  *   - -ENOTSUP: device does not support setting the event attribute.
951  *   - <0: failed to set event queue attribute
952  */
953 int
954 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
955 			 uint64_t attr_value);
956 
957 /* Event port specific APIs */
958 
959 /* Event port configuration bitmap flags */
960 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
961 /**< Configure the port not to release outstanding events in
962  * rte_event_dev_dequeue_burst(). If set, all events received through
963  * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
964  * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
965  * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
966  */
967 #define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)
968 /**< This event port links only to a single event queue.
969  *
970  *  @see rte_event_port_setup(), rte_event_port_link()
971  */
972 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER       (1ULL << 2)
973 /**< Hint that this event port will primarily enqueue events to the system.
974  * A PMD can optimize its internal workings by assuming that this port is
975  * primarily going to enqueue NEW events.
976  *
977  * Note that this flag is only a hint, so PMDs must operate under the
978  * assumption that any port can enqueue an event with any type of op.
979  *
980  *  @see rte_event_port_setup()
981  */
982 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER       (1ULL << 3)
983 /**< Hint that this event port will primarily dequeue events from the system.
984  * A PMD can optimize its internal workings by assuming that this port is
985  * primarily going to consume events, and not enqueue FORWARD or RELEASE
986  * events.
987  *
988  * Note that this flag is only a hint, so PMDs must operate under the
989  * assumption that any port can enqueue an event with any type of op.
990  *
991  *  @see rte_event_port_setup()
992  */
993 #define RTE_EVENT_PORT_CFG_HINT_WORKER         (1ULL << 4)
994 /**< Hint that this event port will primarily pass existing events through.
995  * A PMD can optimize its internal workings by assuming that this port is
996  * primarily going to FORWARD events, and not enqueue NEW or RELEASE events
997  * often.
998  *
999  * Note that this flag is only a hint, so PMDs must operate under the
1000  * assumption that any port can enqueue an event with any type of op.
1001  *
1002  *  @see rte_event_port_setup()
1003  */
1004 
1005 /** Event port configuration structure */
1006 struct rte_event_port_conf {
1007 	int32_t new_event_threshold;
1008 	/**< A backpressure threshold for new event enqueues on this port.
1009 	 * Use for *closed system* event dev where event capacity is limited,
1010 	 * and cannot exceed the capacity of the event dev.
1011 	 * Configuring ports with different thresholds can make higher priority
1012 	 * traffic less likely to  be backpressured.
1013 	 * For example, a port used to inject NIC Rx packets into the event dev
1014 	 * can have a lower threshold so as not to overwhelm the device,
1015 	 * while ports used for worker pools can have a higher threshold.
1016 	 * This value cannot exceed the *nb_events_limit*
1017 	 * which was previously supplied to rte_event_dev_configure().
1018 	 * This should be set to '-1' for *open system*.
1019 	 */
1020 	uint16_t dequeue_depth;
1021 	/**< Configure number of bulk dequeues for this event port.
1022 	 * This value cannot exceed the *nb_event_port_dequeue_depth*
1023 	 * which previously supplied to rte_event_dev_configure().
1024 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
1025 	 */
1026 	uint16_t enqueue_depth;
1027 	/**< Configure number of bulk enqueues for this event port.
1028 	 * This value cannot exceed the *nb_event_port_enqueue_depth*
1029 	 * which previously supplied to rte_event_dev_configure().
1030 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
1031 	 */
1032 	uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */
1033 };
1034 
1035 /**
1036  * Retrieve the default configuration information of an event port designated
1037  * by its *port_id* from the event driver for an event device.
1038  *
1039  * This function intended to be used in conjunction with rte_event_port_setup()
1040  * where caller needs to set up the port by overriding few default values.
1041  *
1042  * @param dev_id
1043  *   The identifier of the device.
1044  * @param port_id
1045  *   The index of the event port to get the configuration information.
1046  *   The value must be in the range [0, nb_event_ports - 1]
1047  *   previously supplied to rte_event_dev_configure().
1048  * @param[out] port_conf
1049  *   The pointer to the default event port configuration data
1050  * @return
1051  *   - 0: Success, driver updates the default event port configuration data.
1052  *   - <0: Error code returned by the driver info get function.
1053  *
1054  * @see rte_event_port_setup()
1055  */
1056 int
1057 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
1058 				struct rte_event_port_conf *port_conf);
1059 
1060 /**
1061  * Allocate and set up an event port for an event device.
1062  *
1063  * @param dev_id
1064  *   The identifier of the device.
1065  * @param port_id
1066  *   The index of the event port to setup. The value must be in the range
1067  *   [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
1068  * @param port_conf
1069  *   The pointer to the configuration data to be used for the queue.
1070  *   NULL value is allowed, in which case default configuration	used.
1071  *
1072  * @see rte_event_port_default_conf_get()
1073  *
1074  * @return
1075  *   - 0: Success, event port correctly set up.
1076  *   - <0: Port configuration failed
1077  *   - (-EDQUOT) Quota exceeded(Application tried to link the queue configured
1078  *   with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1079  */
1080 int
1081 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
1082 		     const struct rte_event_port_conf *port_conf);
1083 
1084 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
1085 					  struct rte_event event, void *arg);
1086 /**< Callback function prototype that can be passed during
1087  * rte_event_port_release(), invoked once per a released event.
1088  */
1089 
1090 /**
1091  * Quiesce any core specific resources consumed by the event port.
1092  *
1093  * Event ports are generally coupled with lcores, and a given Hardware
1094  * implementation might require the PMD to store port specific data in the
1095  * lcore.
1096  * When the application decides to migrate the event port to another lcore
1097  * or teardown the current lcore it may to call `rte_event_port_quiesce`
1098  * to make sure that all the data associated with the event port are released
1099  * from the lcore, this might also include any prefetched events.
1100  * While releasing the event port from the lcore, this function calls the
1101  * user-provided flush callback once per event.
1102  *
1103  * @note Invocation of this API does not affect the existing port configuration.
1104  *
1105  * @param dev_id
1106  *   The identifier of the device.
1107  * @param port_id
1108  *   The index of the event port to setup. The value must be in the range
1109  *   [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
1110  * @param release_cb
1111  *   Callback function invoked once per flushed event.
1112  * @param args
1113  *   Argument supplied to callback.
1114  */
1115 void
1116 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
1117 		       rte_eventdev_port_flush_t release_cb, void *args);
1118 
1119 /**
1120  * The queue depth of the port on the enqueue side
1121  */
1122 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
1123 /**
1124  * The queue depth of the port on the dequeue side
1125  */
1126 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
1127 /**
1128  * The new event threshold of the port
1129  */
1130 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
1131 /**
1132  * The implicit release disable attribute of the port
1133  */
1134 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
1135 
1136 /**
1137  * Get an attribute from a port.
1138  *
1139  * @param dev_id
1140  *   Eventdev id
1141  * @param port_id
1142  *   Eventdev port id
1143  * @param attr_id
1144  *   The attribute ID to retrieve
1145  * @param[out] attr_value
1146  *   A pointer that will be filled in with the attribute value if successful
1147  *
1148  * @return
1149  *   - 0: Successfully returned value
1150  *   - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL
1151  */
1152 int
1153 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1154 			uint32_t *attr_value);
1155 
1156 /**
1157  * Start an event device.
1158  *
1159  * The device start step is the last one and consists of setting the event
1160  * queues to start accepting the events and schedules to event ports.
1161  *
1162  * On success, all basic functions exported by the API (event enqueue,
1163  * event dequeue and so on) can be invoked.
1164  *
1165  * @param dev_id
1166  *   Event device identifier
1167  * @return
1168  *   - 0: Success, device started.
1169  *   - -ESTALE : Not all ports of the device are configured
1170  *   - -ENOLINK: Not all queues are linked, which could lead to deadlock.
1171  */
1172 int
1173 rte_event_dev_start(uint8_t dev_id);
1174 
1175 /**
1176  * Stop an event device.
1177  *
1178  * This function causes all queued events to be drained, including those
1179  * residing in event ports. While draining events out of the device, this
1180  * function calls the user-provided flush callback (if one was registered) once
1181  * per event.
1182  *
1183  * The device can be restarted with a call to rte_event_dev_start(). Threads
1184  * that continue to enqueue/dequeue while the device is stopped, or being
1185  * stopped, will result in undefined behavior. This includes event adapters,
1186  * which must be stopped prior to stopping the eventdev.
1187  *
1188  * @param dev_id
1189  *   Event device identifier.
1190  *
1191  * @see rte_event_dev_stop_flush_callback_register()
1192  */
1193 void
1194 rte_event_dev_stop(uint8_t dev_id);
1195 
1196 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1197 					  struct rte_event event, void *arg);
1198 /**< Callback function called during rte_event_dev_stop(), invoked once per
1199  * flushed event.
1200  */
1201 
1202 /**
1203  * Registers a callback function to be invoked during rte_event_dev_stop() for
1204  * each flushed event. This function can be used to properly dispose of queued
1205  * events, for example events containing memory pointers.
1206  *
1207  * The callback function is only registered for the calling process. The
1208  * callback function must be registered in every process that can call
1209  * rte_event_dev_stop().
1210  *
1211  * To unregister a callback, call this function with a NULL callback pointer.
1212  *
1213  * @param dev_id
1214  *   The identifier of the device.
1215  * @param callback
1216  *   Callback function invoked once per flushed event.
1217  * @param userdata
1218  *   Argument supplied to callback.
1219  *
1220  * @return
1221  *  - 0 on success.
1222  *  - -EINVAL if *dev_id* is invalid
1223  *
1224  * @see rte_event_dev_stop()
1225  */
1226 int rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1227 					       rte_eventdev_stop_flush_t callback, void *userdata);
1228 
1229 /**
1230  * Close an event device. The device cannot be restarted!
1231  *
1232  * @param dev_id
1233  *   Event device identifier
1234  *
1235  * @return
1236  *  - 0 on successfully closing device
1237  *  - <0 on failure to close device
1238  *  - (-EAGAIN) if device is busy
1239  */
1240 int
1241 rte_event_dev_close(uint8_t dev_id);
1242 
1243 /**
1244  * Event vector structure.
1245  */
1246 struct rte_event_vector {
1247 	uint16_t nb_elem;
1248 	/**< Number of elements valid in this event vector. */
1249 	uint16_t elem_offset : 12;
1250 	/**< Offset into the vector array where valid elements start from. */
1251 	uint16_t rsvd : 3;
1252 	/**< Reserved for future use */
1253 	uint16_t attr_valid : 1;
1254 	/**< Indicates that the below union attributes have valid information.
1255 	 */
1256 	union {
1257 		/* Used by Rx/Tx adapter.
1258 		 * Indicates that all the elements in this vector belong to the
1259 		 * same port and queue pair when originating from Rx adapter,
1260 		 * valid only when event type is ETHDEV_VECTOR or
1261 		 * ETH_RX_ADAPTER_VECTOR.
1262 		 * Can also be used to indicate the Tx adapter the destination
1263 		 * port and queue of the mbufs in the vector
1264 		 */
1265 		struct {
1266 			uint16_t port;
1267 			/* Ethernet device port id. */
1268 			uint16_t queue;
1269 			/* Ethernet device queue id. */
1270 		};
1271 	};
1272 	/**< Union to hold common attributes of the vector array. */
1273 	uint64_t impl_opaque;
1274 
1275 /* empty structures do not have zero size in C++ leading to compilation errors
1276  * with clang about structure having different sizes in C and C++.
1277  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1278  * C++ builds, removing the warning.
1279  */
1280 #ifndef __cplusplus
1281 	/**< Implementation specific opaque value.
1282 	 * An implementation may use this field to hold implementation specific
1283 	 * value to share between dequeue and enqueue operation.
1284 	 * The application should not modify this field.
1285 	 */
1286 	union {
1287 #endif
1288 		struct rte_mbuf *mbufs[0];
1289 		void *ptrs[0];
1290 		uint64_t u64s[0];
1291 #ifndef __cplusplus
1292 	} __rte_aligned(16);
1293 #endif
1294 	/**< Start of the vector array union. Depending upon the event type the
1295 	 * vector array can be an array of mbufs or pointers or opaque u64
1296 	 * values.
1297 	 */
1298 } __rte_aligned(16);
1299 
1300 /* Scheduler type definitions */
1301 #define RTE_SCHED_TYPE_ORDERED          0
1302 /**< Ordered scheduling
1303  *
1304  * Events from an ordered flow of an event queue can be scheduled to multiple
1305  * ports for concurrent processing while maintaining the original event order.
1306  * This scheme enables the user to achieve high single flow throughput by
1307  * avoiding SW synchronization for ordering between ports which bound to cores.
1308  *
1309  * The source flow ordering from an event queue is maintained when events are
1310  * enqueued to their destination queue within the same ordered flow context.
1311  * An event port holds the context until application call
1312  * rte_event_dequeue_burst() from the same port, which implicitly releases
1313  * the context.
1314  * User may allow the scheduler to release the context earlier than that
1315  * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation.
1316  *
1317  * Events from the source queue appear in their original order when dequeued
1318  * from a destination queue.
1319  * Event ordering is based on the received event(s), but also other
1320  * (newly allocated or stored) events are ordered when enqueued within the same
1321  * ordered context. Events not enqueued (e.g. released or stored) within the
1322  * context are  considered missing from reordering and are skipped at this time
1323  * (but can be ordered again within another context).
1324  *
1325  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1326  */
1327 
1328 #define RTE_SCHED_TYPE_ATOMIC           1
1329 /**< Atomic scheduling
1330  *
1331  * Events from an atomic flow of an event queue can be scheduled only to a
1332  * single port at a time. The port is guaranteed to have exclusive (atomic)
1333  * access to the associated flow context, which enables the user to avoid SW
1334  * synchronization. Atomic flows also help to maintain event ordering
1335  * since only one port at a time can process events from a flow of an
1336  * event queue.
1337  *
1338  * The atomic queue synchronization context is dedicated to the port until
1339  * application call rte_event_dequeue_burst() from the same port,
1340  * which implicitly releases the context. User may allow the scheduler to
1341  * release the context earlier than that by invoking rte_event_enqueue_burst()
1342  * with RTE_EVENT_OP_RELEASE operation.
1343  *
1344  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1345  */
1346 
1347 #define RTE_SCHED_TYPE_PARALLEL         2
1348 /**< Parallel scheduling
1349  *
1350  * The scheduler performs priority scheduling, load balancing, etc. functions
1351  * but does not provide additional event synchronization or ordering.
1352  * It is free to schedule events from a single parallel flow of an event queue
1353  * to multiple events ports for concurrent processing.
1354  * The application is responsible for flow context synchronization and
1355  * event ordering (SW synchronization).
1356  *
1357  * @see rte_event_queue_setup(), rte_event_dequeue_burst()
1358  */
1359 
1360 /* Event types to classify the event source */
1361 #define RTE_EVENT_TYPE_ETHDEV           0x0
1362 /**< The event generated from ethdev subsystem */
1363 #define RTE_EVENT_TYPE_CRYPTODEV        0x1
1364 /**< The event generated from crypodev subsystem */
1365 #define RTE_EVENT_TYPE_TIMER		0x2
1366 /**< The event generated from event timer adapter */
1367 #define RTE_EVENT_TYPE_CPU              0x3
1368 /**< The event generated from cpu for pipelining.
1369  * Application may use *sub_event_type* to further classify the event
1370  */
1371 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER   0x4
1372 /**< The event generated from event eth Rx adapter */
1373 #define RTE_EVENT_TYPE_DMADEV           0x5
1374 /**< The event generated from dma subsystem */
1375 #define RTE_EVENT_TYPE_VECTOR           0x8
1376 /**< Indicates that event is a vector.
1377  * All vector event types should be a logical OR of EVENT_TYPE_VECTOR.
1378  * This simplifies the pipeline design as one can split processing the events
1379  * between vector events and normal event across event types.
1380  * Example:
1381  *	if (ev.event_type & RTE_EVENT_TYPE_VECTOR) {
1382  *		// Classify and handle vector event.
1383  *	} else {
1384  *		// Classify and handle event.
1385  *	}
1386  */
1387 #define RTE_EVENT_TYPE_ETHDEV_VECTOR                                           \
1388 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1389 /**< The event vector generated from ethdev subsystem */
1390 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1391 /**< The event vector generated from cpu for pipelining. */
1392 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR                                   \
1393 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1394 /**< The event vector generated from eth Rx adapter. */
1395 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
1396 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1397 /**< The event vector generated from cryptodev adapter. */
1398 
1399 #define RTE_EVENT_TYPE_MAX              0x10
1400 /**< Maximum number of event types */
1401 
1402 /* Event enqueue operations */
1403 #define RTE_EVENT_OP_NEW                0
1404 /**< The event producers use this operation to inject a new event to the
1405  * event device.
1406  */
1407 #define RTE_EVENT_OP_FORWARD            1
1408 /**< The CPU use this operation to forward the event to different event queue or
1409  * change to new application specific flow or schedule type to enable
1410  * pipelining.
1411  *
1412  * This operation must only be enqueued to the same port that the
1413  * event to be forwarded was dequeued from.
1414  */
1415 #define RTE_EVENT_OP_RELEASE            2
1416 /**< Release the flow context associated with the schedule type.
1417  *
1418  * If current flow's scheduler type method is *RTE_SCHED_TYPE_ATOMIC*
1419  * then this function hints the scheduler that the user has completed critical
1420  * section processing in the current atomic context.
1421  * The scheduler is now allowed to schedule events from the same flow from
1422  * an event queue to another port. However, the context may be still held
1423  * until the next rte_event_dequeue_burst() call, this call allows but does not
1424  * force the scheduler to release the context early.
1425  *
1426  * Early atomic context release may increase parallelism and thus system
1427  * performance, but the user needs to design carefully the split into critical
1428  * vs non-critical sections.
1429  *
1430  * If current flow's scheduler type method is *RTE_SCHED_TYPE_ORDERED*
1431  * then this function hints the scheduler that the user has done all that need
1432  * to maintain event order in the current ordered context.
1433  * The scheduler is allowed to release the ordered context of this port and
1434  * avoid reordering any following enqueues.
1435  *
1436  * Early ordered context release may increase parallelism and thus system
1437  * performance.
1438  *
1439  * If current flow's scheduler type method is *RTE_SCHED_TYPE_PARALLEL*
1440  * or no scheduling context is held then this function may be an NOOP,
1441  * depending on the implementation.
1442  *
1443  * This operation must only be enqueued to the same port that the
1444  * event to be released was dequeued from.
1445  */
1446 
1447 /**
1448  * The generic *rte_event* structure to hold the event attributes
1449  * for dequeue and enqueue operation
1450  */
1451 struct rte_event {
1452 	/** WORD0 */
1453 	union {
1454 		uint64_t event;
1455 		/** Event attributes for dequeue or enqueue operation */
1456 		struct {
1457 			uint32_t flow_id:20;
1458 			/**< Targeted flow identifier for the enqueue and
1459 			 * dequeue operation.
1460 			 * The value must be in the range of
1461 			 * [0, nb_event_queue_flows - 1] which
1462 			 * previously supplied to rte_event_dev_configure().
1463 			 */
1464 			uint32_t sub_event_type:8;
1465 			/**< Sub-event types based on the event source.
1466 			 * @see RTE_EVENT_TYPE_CPU
1467 			 */
1468 			uint32_t event_type:4;
1469 			/**< Event type to classify the event source.
1470 			 * @see RTE_EVENT_TYPE_ETHDEV, (RTE_EVENT_TYPE_*)
1471 			 */
1472 			uint8_t op:2;
1473 			/**< The type of event enqueue operation - new/forward/
1474 			 * etc.This field is not preserved across an instance
1475 			 * and is undefined on dequeue.
1476 			 * @see RTE_EVENT_OP_NEW, (RTE_EVENT_OP_*)
1477 			 */
1478 			uint8_t rsvd:4;
1479 			/**< Reserved for future use */
1480 			uint8_t sched_type:2;
1481 			/**< Scheduler synchronization type (RTE_SCHED_TYPE_*)
1482 			 * associated with flow id on a given event queue
1483 			 * for the enqueue and dequeue operation.
1484 			 */
1485 			uint8_t queue_id;
1486 			/**< Targeted event queue identifier for the enqueue or
1487 			 * dequeue operation.
1488 			 * The value must be in the range of
1489 			 * [0, nb_event_queues - 1] which previously supplied to
1490 			 * rte_event_dev_configure().
1491 			 */
1492 			uint8_t priority;
1493 			/**< Event priority relative to other events in the
1494 			 * event queue. The requested priority should in the
1495 			 * range of  [RTE_EVENT_DEV_PRIORITY_HIGHEST,
1496 			 * RTE_EVENT_DEV_PRIORITY_LOWEST].
1497 			 * The implementation shall normalize the requested
1498 			 * priority to supported priority value.
1499 			 * Valid when the device has
1500 			 * RTE_EVENT_DEV_CAP_EVENT_QOS capability.
1501 			 */
1502 			uint8_t impl_opaque;
1503 			/**< Implementation specific opaque value.
1504 			 * An implementation may use this field to hold
1505 			 * implementation specific value to share between
1506 			 * dequeue and enqueue operation.
1507 			 * The application should not modify this field.
1508 			 */
1509 		};
1510 	};
1511 	/** WORD1 */
1512 	union {
1513 		uint64_t u64;
1514 		/**< Opaque 64-bit value */
1515 		void *event_ptr;
1516 		/**< Opaque event pointer */
1517 		struct rte_mbuf *mbuf;
1518 		/**< mbuf pointer if dequeued event is associated with mbuf */
1519 		struct rte_event_vector *vec;
1520 		/**< Event vector pointer. */
1521 	};
1522 };
1523 
1524 /* Ethdev Rx adapter capability bitmap flags */
1525 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT	0x1
1526 /**< This flag is sent when the packet transfer mechanism is in HW.
1527  * Ethdev can send packets to the event device using internal event port.
1528  */
1529 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ	0x2
1530 /**< Adapter supports multiple event queues per ethdev. Every ethdev
1531  * Rx queue can be connected to a unique event queue.
1532  */
1533 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
1534 /**< The application can override the adapter generated flow ID in the
1535  * event. This flow ID can be specified when adding an ethdev Rx queue
1536  * to the adapter using the ev.flow_id member.
1537  * @see struct rte_event_eth_rx_adapter_queue_conf::ev
1538  * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
1539  */
1540 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR	0x8
1541 /**< Adapter supports event vectorization per ethdev. */
1542 
1543 /**
1544  * Retrieve the event device's ethdev Rx adapter capabilities for the
1545  * specified ethernet port
1546  *
1547  * @param dev_id
1548  *   The identifier of the device.
1549  *
1550  * @param eth_port_id
1551  *   The identifier of the ethernet device.
1552  *
1553  * @param[out] caps
1554  *   A pointer to memory filled with Rx event adapter capabilities.
1555  *
1556  * @return
1557  *   - 0: Success, driver provides Rx event adapter capabilities for the
1558  *	ethernet device.
1559  *   - <0: Error code returned by the driver function.
1560  */
1561 int
1562 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1563 				uint32_t *caps);
1564 
1565 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1566 /**< This flag is set when the timer mechanism is in HW. */
1567 
1568 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC      (1ULL << 1)
1569 /**< This flag is set if periodic mode is supported. */
1570 
1571 /**
1572  * Retrieve the event device's timer adapter capabilities.
1573  *
1574  * @param dev_id
1575  *   The identifier of the device.
1576  *
1577  * @param[out] caps
1578  *   A pointer to memory to be filled with event timer adapter capabilities.
1579  *
1580  * @return
1581  *   - 0: Success, driver provided event timer adapter capabilities.
1582  *   - <0: Error code returned by the driver function.
1583  */
1584 int
1585 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1586 
1587 /* Crypto adapter capability bitmap flag */
1588 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW   0x1
1589 /**< Flag indicates HW is capable of generating events in
1590  * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send
1591  * packets to the event device as new events using an internal
1592  * event port.
1593  */
1594 
1595 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD   0x2
1596 /**< Flag indicates HW is capable of generating events in
1597  * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send
1598  * packets to the event device as forwarded event using an
1599  * internal event port.
1600  */
1601 
1602 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND  0x4
1603 /**< Flag indicates HW is capable of mapping crypto queue pair to
1604  * event queue.
1605  */
1606 
1607 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA   0x8
1608 /**< Flag indicates HW/SW supports a mechanism to store and retrieve
1609  * the private data information along with the crypto session.
1610  */
1611 
1612 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
1613 /**< Flag indicates HW is capable of aggregating processed
1614  * crypto operations into rte_event_vector.
1615  */
1616 
1617 /**
1618  * Retrieve the event device's crypto adapter capabilities for the
1619  * specified cryptodev device
1620  *
1621  * @param dev_id
1622  *   The identifier of the device.
1623  *
1624  * @param cdev_id
1625  *   The identifier of the cryptodev device.
1626  *
1627  * @param[out] caps
1628  *   A pointer to memory filled with event adapter capabilities.
1629  *   It is expected to be pre-allocated & initialized by caller.
1630  *
1631  * @return
1632  *   - 0: Success, driver provides event adapter capabilities for the
1633  *     cryptodev device.
1634  *   - <0: Error code returned by the driver function.
1635  */
1636 int
1637 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1638 				  uint32_t *caps);
1639 
1640 /* DMA adapter capability bitmap flag */
1641 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1642 /**< Flag indicates HW is capable of generating events in
1643  * RTE_EVENT_OP_NEW enqueue operation. DMADEV will send
1644  * packets to the event device as new events using an
1645  * internal event port.
1646  */
1647 
1648 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1649 /**< Flag indicates HW is capable of generating events in
1650  * RTE_EVENT_OP_FORWARD enqueue operation. DMADEV will send
1651  * packets to the event device as forwarded event using an
1652  * internal event port.
1653  */
1654 
1655 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1656 /**< Flag indicates HW is capable of mapping DMA vchan to event queue. */
1657 
1658 /**
1659  * Retrieve the event device's DMA adapter capabilities for the
1660  * specified dmadev device
1661  *
1662  * @param dev_id
1663  *   The identifier of the device.
1664  *
1665  * @param dmadev_id
1666  *   The identifier of the dmadev device.
1667  *
1668  * @param[out] caps
1669  *   A pointer to memory filled with event adapter capabilities.
1670  *   It is expected to be pre-allocated & initialized by caller.
1671  *
1672  * @return
1673  *   - 0: Success, driver provides event adapter capabilities for the
1674  *     dmadev device.
1675  *   - <0: Error code returned by the driver function.
1676  *
1677  */
1678 __rte_experimental
1679 int
1680 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1681 
1682 /* Ethdev Tx adapter capability bitmap flags */
1683 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT	0x1
1684 /**< This flag is sent when the PMD supports a packet transmit callback
1685  */
1686 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR	0x2
1687 /**< Indicates that the Tx adapter is capable of handling event vector of
1688  * mbufs.
1689  */
1690 
1691 /**
1692  * Retrieve the event device's eth Tx adapter capabilities
1693  *
1694  * @param dev_id
1695  *   The identifier of the device.
1696  *
1697  * @param eth_port_id
1698  *   The identifier of the ethernet device.
1699  *
1700  * @param[out] caps
1701  *   A pointer to memory filled with eth Tx adapter capabilities.
1702  *
1703  * @return
1704  *   - 0: Success, driver provides eth Tx adapter capabilities.
1705  *   - <0: Error code returned by the driver function.
1706  */
1707 int
1708 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1709 				uint32_t *caps);
1710 
1711 /**
1712  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
1713  *
1714  * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag
1715  * then application can use this function to convert timeout value in
1716  * nanoseconds to implementations specific timeout value supplied in
1717  * rte_event_dequeue_burst()
1718  *
1719  * @param dev_id
1720  *   The identifier of the device.
1721  * @param ns
1722  *   Wait time in nanosecond
1723  * @param[out] timeout_ticks
1724  *   Value for the *timeout_ticks* parameter in rte_event_dequeue_burst()
1725  *
1726  * @return
1727  *  - 0 on success.
1728  *  - -ENOTSUP if the device doesn't support timeouts
1729  *  - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL
1730  *  - other values < 0 on failure.
1731  *
1732  * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
1733  * @see rte_event_dev_configure()
1734  */
1735 int
1736 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1737 					uint64_t *timeout_ticks);
1738 
1739 /**
1740  * Link multiple source event queues supplied in *queues* to the destination
1741  * event port designated by its *port_id* with associated service priority
1742  * supplied in *priorities* on the event device designated by its *dev_id*.
1743  *
1744  * The link establishment shall enable the event port *port_id* from
1745  * receiving events from the specified event queue(s) supplied in *queues*
1746  *
1747  * An event queue may link to one or more event ports.
1748  * The number of links can be established from an event queue to event port is
1749  * implementation defined.
1750  *
1751  * Event queue(s) to event port link establishment can be changed at runtime
1752  * without re-configuring the device to support scaling and to reduce the
1753  * latency of critical work by establishing the link with more event ports
1754  * at runtime.
1755  *
1756  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
1757  * than or equal to one, this function links the event queues to the default
1758  * profile_id i.e. profile_id 0 of the event port.
1759  *
1760  * @param dev_id
1761  *   The identifier of the device.
1762  *
1763  * @param port_id
1764  *   Event port identifier to select the destination port to link.
1765  *
1766  * @param queues
1767  *   Points to an array of *nb_links* event queues to be linked
1768  *   to the event port.
1769  *   NULL value is allowed, in which case this function links all the configured
1770  *   event queues *nb_event_queues* which previously supplied to
1771  *   rte_event_dev_configure() to the event port *port_id*
1772  *
1773  * @param priorities
1774  *   Points to an array of *nb_links* service priorities associated with each
1775  *   event queue link to event port.
1776  *   The priority defines the event port's servicing priority for
1777  *   event queue, which may be ignored by an implementation.
1778  *   The requested priority should in the range of
1779  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
1780  *   The implementation shall normalize the requested priority to
1781  *   implementation supported priority value.
1782  *   NULL value is allowed, in which case this function links the event queues
1783  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
1784  *
1785  * @param nb_links
1786  *   The number of links to establish. This parameter is ignored if queues is
1787  *   NULL.
1788  *
1789  * @return
1790  * The number of links actually established. The return value can be less than
1791  * the value of the *nb_links* parameter when the implementation has the
1792  * limitation on specific queue to port link establishment or if invalid
1793  * parameters are specified in *queues*
1794  * If the return value is less than *nb_links*, the remaining links at the end
1795  * of link[] are not established, and the caller has to take care of them.
1796  * If return value is less than *nb_links* then implementation shall update the
1797  * rte_errno accordingly, Possible rte_errno values are
1798  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
1799  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1800  * (EINVAL) Invalid parameter
1801  */
1802 int
1803 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1804 		    const uint8_t queues[], const uint8_t priorities[],
1805 		    uint16_t nb_links);
1806 
1807 /**
1808  * Unlink multiple source event queues supplied in *queues* from the destination
1809  * event port designated by its *port_id* on the event device designated
1810  * by its *dev_id*.
1811  *
1812  * The unlink call issues an async request to disable the event port *port_id*
1813  * from receiving events from the specified event queue *queue_id*.
1814  * Event queue(s) to event port unlink establishment can be changed at runtime
1815  * without re-configuring the device.
1816  *
1817  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
1818  * than or equal to one, this function unlinks the event queues from the default
1819  * profile identifier i.e. profile 0 of the event port.
1820  *
1821  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
1822  *
1823  * @param dev_id
1824  *   The identifier of the device.
1825  *
1826  * @param port_id
1827  *   Event port identifier to select the destination port to unlink.
1828  *
1829  * @param queues
1830  *   Points to an array of *nb_unlinks* event queues to be unlinked
1831  *   from the event port.
1832  *   NULL value is allowed, in which case this function unlinks all the
1833  *   event queue(s) from the event port *port_id*.
1834  *
1835  * @param nb_unlinks
1836  *   The number of unlinks to establish. This parameter is ignored if queues is
1837  *   NULL.
1838  *
1839  * @return
1840  * The number of unlinks successfully requested. The return value can be less
1841  * than the value of the *nb_unlinks* parameter when the implementation has the
1842  * limitation on specific queue to port unlink establishment or
1843  * if invalid parameters are specified.
1844  * If the return value is less than *nb_unlinks*, the remaining queues at the
1845  * end of queues[] are not unlinked, and the caller has to take care of them.
1846  * If return value is less than *nb_unlinks* then implementation shall update
1847  * the rte_errno accordingly, Possible rte_errno values are
1848  * (EINVAL) Invalid parameter
1849  */
1850 int
1851 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1852 		      uint8_t queues[], uint16_t nb_unlinks);
1853 
1854 /**
1855  * Link multiple source event queues supplied in *queues* to the destination
1856  * event port designated by its *port_id* with associated profile identifier
1857  * supplied in *profile_id* with service priorities supplied in *priorities*
1858  * on the event device designated by its *dev_id*.
1859  *
1860  * If *profile_id* is set to 0 then, the links created by the call `rte_event_port_link`
1861  * will be overwritten.
1862  *
1863  * Event ports by default use profile_id 0 unless it is changed using the
1864  * call ``rte_event_port_profile_switch()``.
1865  *
1866  * The link establishment shall enable the event port *port_id* from
1867  * receiving events from the specified event queue(s) supplied in *queues*
1868  *
1869  * An event queue may link to one or more event ports.
1870  * The number of links can be established from an event queue to event port is
1871  * implementation defined.
1872  *
1873  * Event queue(s) to event port link establishment can be changed at runtime
1874  * without re-configuring the device to support scaling and to reduce the
1875  * latency of critical work by establishing the link with more event ports
1876  * at runtime.
1877  *
1878  * @param dev_id
1879  *   The identifier of the device.
1880  *
1881  * @param port_id
1882  *   Event port identifier to select the destination port to link.
1883  *
1884  * @param queues
1885  *   Points to an array of *nb_links* event queues to be linked
1886  *   to the event port.
1887  *   NULL value is allowed, in which case this function links all the configured
1888  *   event queues *nb_event_queues* which previously supplied to
1889  *   rte_event_dev_configure() to the event port *port_id*
1890  *
1891  * @param priorities
1892  *   Points to an array of *nb_links* service priorities associated with each
1893  *   event queue link to event port.
1894  *   The priority defines the event port's servicing priority for
1895  *   event queue, which may be ignored by an implementation.
1896  *   The requested priority should in the range of
1897  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
1898  *   The implementation shall normalize the requested priority to
1899  *   implementation supported priority value.
1900  *   NULL value is allowed, in which case this function links the event queues
1901  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
1902  *
1903  * @param nb_links
1904  *   The number of links to establish. This parameter is ignored if queues is
1905  *   NULL.
1906  *
1907  * @param profile_id
1908  *   The profile identifier associated with the links between event queues and
1909  *   event port. Should be less than the max capability reported by
1910  *   ``rte_event_dev_info::max_profiles_per_port``
1911  *
1912  * @return
1913  * The number of links actually established. The return value can be less than
1914  * the value of the *nb_links* parameter when the implementation has the
1915  * limitation on specific queue to port link establishment or if invalid
1916  * parameters are specified in *queues*
1917  * If the return value is less than *nb_links*, the remaining links at the end
1918  * of link[] are not established, and the caller has to take care of them.
1919  * If return value is less than *nb_links* then implementation shall update the
1920  * rte_errno accordingly, Possible rte_errno values are
1921  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
1922  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1923  * (EINVAL) Invalid parameter
1924  *
1925  */
1926 __rte_experimental
1927 int
1928 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
1929 				 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
1930 
1931 /**
1932  * Unlink multiple source event queues supplied in *queues* that belong to profile
1933  * designated by *profile_id* from the destination event port designated by its
1934  * *port_id* on the event device designated by its *dev_id*.
1935  *
1936  * If *profile_id* is set to 0 i.e., the default profile then, then this function
1937  * will act as ``rte_event_port_unlink``.
1938  *
1939  * The unlink call issues an async request to disable the event port *port_id*
1940  * from receiving events from the specified event queue *queue_id*.
1941  * Event queue(s) to event port unlink establishment can be changed at runtime
1942  * without re-configuring the device.
1943  *
1944  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
1945  *
1946  * @param dev_id
1947  *   The identifier of the device.
1948  *
1949  * @param port_id
1950  *   Event port identifier to select the destination port to unlink.
1951  *
1952  * @param queues
1953  *   Points to an array of *nb_unlinks* event queues to be unlinked
1954  *   from the event port.
1955  *   NULL value is allowed, in which case this function unlinks all the
1956  *   event queue(s) from the event port *port_id*.
1957  *
1958  * @param nb_unlinks
1959  *   The number of unlinks to establish. This parameter is ignored if queues is
1960  *   NULL.
1961  *
1962  * @param profile_id
1963  *   The profile identifier associated with the links between event queues and
1964  *   event port. Should be less than the max capability reported by
1965  *   ``rte_event_dev_info::max_profiles_per_port``
1966  *
1967  * @return
1968  * The number of unlinks successfully requested. The return value can be less
1969  * than the value of the *nb_unlinks* parameter when the implementation has the
1970  * limitation on specific queue to port unlink establishment or
1971  * if invalid parameters are specified.
1972  * If the return value is less than *nb_unlinks*, the remaining queues at the
1973  * end of queues[] are not unlinked, and the caller has to take care of them.
1974  * If return value is less than *nb_unlinks* then implementation shall update
1975  * the rte_errno accordingly, Possible rte_errno values are
1976  * (EINVAL) Invalid parameter
1977  *
1978  */
1979 __rte_experimental
1980 int
1981 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1982 			      uint16_t nb_unlinks, uint8_t profile_id);
1983 
1984 /**
1985  * Returns the number of unlinks in progress.
1986  *
1987  * This function provides the application with a method to detect when an
1988  * unlink has been completed by the implementation.
1989  *
1990  * @see rte_event_port_unlink() to issue unlink requests.
1991  *
1992  * @param dev_id
1993  *   The identifier of the device.
1994  *
1995  * @param port_id
1996  *   Event port identifier to select port to check for unlinks in progress.
1997  *
1998  * @return
1999  * The number of unlinks that are in progress. A return of zero indicates that
2000  * there are no outstanding unlink requests. A positive return value indicates
2001  * the number of unlinks that are in progress, but are not yet complete.
2002  * A negative return value indicates an error, -EINVAL indicates an invalid
2003  * parameter passed for *dev_id* or *port_id*.
2004  */
2005 int
2006 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
2007 
2008 /**
2009  * Retrieve the list of source event queues and its associated service priority
2010  * linked to the destination event port designated by its *port_id*
2011  * on the event device designated by its *dev_id*.
2012  *
2013  * @param dev_id
2014  *   The identifier of the device.
2015  *
2016  * @param port_id
2017  *   Event port identifier.
2018  *
2019  * @param[out] queues
2020  *   Points to an array of *queues* for output.
2021  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2022  *   store the event queue(s) linked with event port *port_id*
2023  *
2024  * @param[out] priorities
2025  *   Points to an array of *priorities* for output.
2026  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2027  *   store the service priority associated with each event queue linked
2028  *
2029  * @return
2030  * The number of links established on the event port designated by its
2031  *  *port_id*.
2032  * - <0 on failure.
2033  */
2034 int
2035 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
2036 			 uint8_t queues[], uint8_t priorities[]);
2037 
2038 /**
2039  * Retrieve the list of source event queues and its service priority
2040  * associated to a *profile_id* and linked to the destination event port
2041  * designated by its *port_id* on the event device designated by its *dev_id*.
2042  *
2043  * @param dev_id
2044  *   The identifier of the device.
2045  *
2046  * @param port_id
2047  *   Event port identifier.
2048  *
2049  * @param[out] queues
2050  *   Points to an array of *queues* for output.
2051  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2052  *   store the event queue(s) linked with event port *port_id*
2053  *
2054  * @param[out] priorities
2055  *   Points to an array of *priorities* for output.
2056  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2057  *   store the service priority associated with each event queue linked
2058  *
2059  * @param profile_id
2060  *   The profile identifier associated with the links between event queues and
2061  *   event port. Should be less than the max capability reported by
2062  *   ``rte_event_dev_info::max_profiles_per_port``
2063  *
2064  * @return
2065  * The number of links established on the event port designated by its
2066  *  *port_id*.
2067  * - <0 on failure.
2068  */
2069 __rte_experimental
2070 int
2071 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2072 				 uint8_t priorities[], uint8_t profile_id);
2073 
2074 /**
2075  * Retrieve the service ID of the event dev. If the adapter doesn't use
2076  * a rte_service function, this function returns -ESRCH.
2077  *
2078  * @param dev_id
2079  *   The identifier of the device.
2080  *
2081  * @param [out] service_id
2082  *   A pointer to a uint32_t, to be filled in with the service id.
2083  *
2084  * @return
2085  *   - 0: Success
2086  *   - <0: Error code on failure, if the event dev doesn't use a rte_service
2087  *   function, this function returns -ESRCH.
2088  */
2089 int
2090 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
2091 
2092 /**
2093  * Dump internal information about *dev_id* to the FILE* provided in *f*.
2094  *
2095  * @param dev_id
2096  *   The identifier of the device.
2097  *
2098  * @param f
2099  *   A pointer to a file for output
2100  *
2101  * @return
2102  *   - 0: on success
2103  *   - <0: on failure.
2104  */
2105 int
2106 rte_event_dev_dump(uint8_t dev_id, FILE *f);
2107 
2108 /** Maximum name length for extended statistics counters */
2109 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
2110 
2111 /**
2112  * Selects the component of the eventdev to retrieve statistics from.
2113  */
2114 enum rte_event_dev_xstats_mode {
2115 	RTE_EVENT_DEV_XSTATS_DEVICE,
2116 	RTE_EVENT_DEV_XSTATS_PORT,
2117 	RTE_EVENT_DEV_XSTATS_QUEUE,
2118 };
2119 
2120 /**
2121  * A name-key lookup element for extended statistics.
2122  *
2123  * This structure is used to map between names and ID numbers
2124  * for extended ethdev statistics.
2125  */
2126 struct rte_event_dev_xstats_name {
2127 	char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
2128 };
2129 
2130 /**
2131  * Retrieve names of extended statistics of an event device.
2132  *
2133  * @param dev_id
2134  *   The identifier of the event device.
2135  * @param mode
2136  *   The mode of statistics to retrieve. Choices include the device statistics,
2137  *   port statistics or queue statistics.
2138  * @param queue_port_id
2139  *   Used to specify the port or queue number in queue or port mode, and is
2140  *   ignored in device mode.
2141  * @param[out] xstats_names
2142  *   Block of memory to insert names into. Must be at least size in capacity.
2143  *   If set to NULL, function returns required capacity.
2144  * @param[out] ids
2145  *   Block of memory to insert ids into. Must be at least size in capacity.
2146  *   If set to NULL, function returns required capacity. The id values returned
2147  *   can be passed to *rte_event_dev_xstats_get* to select statistics.
2148  * @param size
2149  *   Capacity of xstats_names (number of names).
2150  * @return
2151  *   - positive value lower or equal to size: success. The return value
2152  *     is the number of entries filled in the stats table.
2153  *   - positive value higher than size: error, the given statistics table
2154  *     is too small. The return value corresponds to the size that should
2155  *     be given to succeed. The entries in the table are not valid and
2156  *     shall not be used by the caller.
2157  *   - negative value on error:
2158  *        -ENODEV for invalid *dev_id*
2159  *        -EINVAL for invalid mode, queue port or id parameters
2160  *        -ENOTSUP if the device doesn't support this function.
2161  */
2162 int
2163 rte_event_dev_xstats_names_get(uint8_t dev_id,
2164 			       enum rte_event_dev_xstats_mode mode,
2165 			       uint8_t queue_port_id,
2166 			       struct rte_event_dev_xstats_name *xstats_names,
2167 			       uint64_t *ids,
2168 			       unsigned int size);
2169 
2170 /**
2171  * Retrieve extended statistics of an event device.
2172  *
2173  * @param dev_id
2174  *   The identifier of the device.
2175  * @param mode
2176  *  The mode of statistics to retrieve. Choices include the device statistics,
2177  *  port statistics or queue statistics.
2178  * @param queue_port_id
2179  *   Used to specify the port or queue number in queue or port mode, and is
2180  *   ignored in device mode.
2181  * @param ids
2182  *   The id numbers of the stats to get. The ids can be got from the stat
2183  *   position in the stat list from rte_event_dev_get_xstats_names(), or
2184  *   by using rte_event_dev_xstats_by_name_get().
2185  * @param[out] values
2186  *   The values for each stats request by ID.
2187  * @param n
2188  *   The number of stats requested
2189  * @return
2190  *   - positive value: number of stat entries filled into the values array
2191  *   - negative value on error:
2192  *        -ENODEV for invalid *dev_id*
2193  *        -EINVAL for invalid mode, queue port or id parameters
2194  *        -ENOTSUP if the device doesn't support this function.
2195  */
2196 int
2197 rte_event_dev_xstats_get(uint8_t dev_id,
2198 			 enum rte_event_dev_xstats_mode mode,
2199 			 uint8_t queue_port_id,
2200 			 const uint64_t ids[],
2201 			 uint64_t values[], unsigned int n);
2202 
2203 /**
2204  * Retrieve the value of a single stat by requesting it by name.
2205  *
2206  * @param dev_id
2207  *   The identifier of the device
2208  * @param name
2209  *   The stat name to retrieve
2210  * @param[out] id
2211  *   If non-NULL, the numerical id of the stat will be returned, so that further
2212  *   requests for the stat can be got using rte_event_dev_xstats_get, which will
2213  *   be faster as it doesn't need to scan a list of names for the stat.
2214  *   If the stat cannot be found, the id returned will be (unsigned)-1.
2215  * @return
2216  *   - positive value or zero: the stat value
2217  *   - negative value: -EINVAL if stat not found, -ENOTSUP if not supported.
2218  */
2219 uint64_t
2220 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2221 				 uint64_t *id);
2222 
2223 /**
2224  * Reset the values of the xstats of the selected component in the device.
2225  *
2226  * @param dev_id
2227  *   The identifier of the device
2228  * @param mode
2229  *   The mode of the statistics to reset. Choose from device, queue or port.
2230  * @param queue_port_id
2231  *   The queue or port to reset. 0 and positive values select ports and queues,
2232  *   while -1 indicates all ports or queues.
2233  * @param ids
2234  *   Selects specific statistics to be reset. When NULL, all statistics selected
2235  *   by *mode* will be reset. If non-NULL, must point to array of at least
2236  *   *nb_ids* size.
2237  * @param nb_ids
2238  *   The number of ids available from the *ids* array. Ignored when ids is NULL.
2239  * @return
2240  *   - zero: successfully reset the statistics to zero
2241  *   - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported.
2242  */
2243 int
2244 rte_event_dev_xstats_reset(uint8_t dev_id,
2245 			   enum rte_event_dev_xstats_mode mode,
2246 			   int16_t queue_port_id,
2247 			   const uint64_t ids[],
2248 			   uint32_t nb_ids);
2249 
2250 /**
2251  * Trigger the eventdev self test.
2252  *
2253  * @param dev_id
2254  *   The identifier of the device
2255  * @return
2256  *   - 0: Selftest successful
2257  *   - -ENOTSUP if the device doesn't support selftest
2258  *   - other values < 0 on failure.
2259  */
2260 int rte_event_dev_selftest(uint8_t dev_id);
2261 
2262 /**
2263  * Get the memory required per event vector based on the number of elements per
2264  * vector.
2265  * This should be used to create the mempool that holds the event vectors.
2266  *
2267  * @param name
2268  *   The name of the vector pool.
2269  * @param n
2270  *   The number of elements in the mbuf pool.
2271  * @param cache_size
2272  *   Size of the per-core object cache. See rte_mempool_create() for
2273  *   details.
2274  * @param nb_elem
2275  *   The number of elements that a single event vector should be able to hold.
2276  * @param socket_id
2277  *   The socket identifier where the memory should be allocated. The
2278  *   value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the
2279  *   reserved zone
2280  *
2281  * @return
2282  *   The pointer to the newly allocated mempool, on success. NULL on error
2283  *   with rte_errno set appropriately. Possible rte_errno values include:
2284  *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
2285  *    - E_RTE_SECONDARY - function was called from a secondary process instance
2286  *    - EINVAL - cache size provided is too large, or priv_size is not aligned.
2287  *    - ENOSPC - the maximum number of memzones has already been allocated
2288  *    - EEXIST - a memzone with the same name already exists
2289  *    - ENOMEM - no appropriate memory area found in which to create memzone
2290  *    - ENAMETOOLONG - mempool name requested is too long.
2291  */
2292 struct rte_mempool *
2293 rte_event_vector_pool_create(const char *name, unsigned int n,
2294 			     unsigned int cache_size, uint16_t nb_elem,
2295 			     int socket_id);
2296 
2297 #include <rte_eventdev_core.h>
2298 
2299 static __rte_always_inline uint16_t
2300 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2301 			  const struct rte_event ev[], uint16_t nb_events,
2302 			  const event_enqueue_burst_t fn)
2303 {
2304 	const struct rte_event_fp_ops *fp_ops;
2305 	void *port;
2306 
2307 	fp_ops = &rte_event_fp_ops[dev_id];
2308 	port = fp_ops->data[port_id];
2309 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2310 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2311 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2312 		rte_errno = EINVAL;
2313 		return 0;
2314 	}
2315 
2316 	if (port == NULL) {
2317 		rte_errno = EINVAL;
2318 		return 0;
2319 	}
2320 #endif
2321 	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2322 	/*
2323 	 * Allow zero cost non burst mode routine invocation if application
2324 	 * requests nb_events as const one
2325 	 */
2326 	if (nb_events == 1)
2327 		return (fp_ops->enqueue)(port, ev);
2328 	else
2329 		return fn(port, ev, nb_events);
2330 }
2331 
2332 /**
2333  * Enqueue a burst of events objects or an event object supplied in *rte_event*
2334  * structure on an  event device designated by its *dev_id* through the event
2335  * port specified by *port_id*. Each event object specifies the event queue on
2336  * which it will be enqueued.
2337  *
2338  * The *nb_events* parameter is the number of event objects to enqueue which are
2339  * supplied in the *ev* array of *rte_event* structure.
2340  *
2341  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2342  * enqueued to the same port that their associated events were dequeued from.
2343  *
2344  * The rte_event_enqueue_burst() function returns the number of
2345  * events objects it actually enqueued. A return value equal to *nb_events*
2346  * means that all event objects have been enqueued.
2347  *
2348  * @param dev_id
2349  *   The identifier of the device.
2350  * @param port_id
2351  *   The identifier of the event port.
2352  * @param ev
2353  *   Points to an array of *nb_events* objects of type *rte_event* structure
2354  *   which contain the event object enqueue operations to be processed.
2355  * @param nb_events
2356  *   The number of event objects to enqueue, typically number of
2357  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2358  *   available for this port.
2359  *
2360  * @return
2361  *   The number of event objects actually enqueued on the event device. The
2362  *   return value can be less than the value of the *nb_events* parameter when
2363  *   the event devices queue is full or if invalid parameters are specified in a
2364  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2365  *   events at the end of ev[] are not consumed and the caller has to take care
2366  *   of them, and rte_errno is set accordingly. Possible errno values include:
2367  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2368  *              ID is invalid, or an event's sched type doesn't match the
2369  *              capabilities of the destination queue.
2370  *   - ENOSPC   The event port was backpressured and unable to enqueue
2371  *              one or more events. This error code is only applicable to
2372  *              closed systems.
2373  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2374  */
2375 static inline uint16_t
2376 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2377 			const struct rte_event ev[], uint16_t nb_events)
2378 {
2379 	const struct rte_event_fp_ops *fp_ops;
2380 
2381 	fp_ops = &rte_event_fp_ops[dev_id];
2382 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2383 					 fp_ops->enqueue_burst);
2384 }
2385 
2386 /**
2387  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
2388  * an event device designated by its *dev_id* through the event port specified
2389  * by *port_id*.
2390  *
2391  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2392  * application can use this API when the all objects in the burst contains
2393  * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
2394  * function can provide the additional hint to the PMD and optimize if possible.
2395  *
2396  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2397  * has event object of operation type != RTE_EVENT_OP_NEW.
2398  *
2399  * @param dev_id
2400  *   The identifier of the device.
2401  * @param port_id
2402  *   The identifier of the event port.
2403  * @param ev
2404  *   Points to an array of *nb_events* objects of type *rte_event* structure
2405  *   which contain the event object enqueue operations to be processed.
2406  * @param nb_events
2407  *   The number of event objects to enqueue, typically number of
2408  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2409  *   available for this port.
2410  *
2411  * @return
2412  *   The number of event objects actually enqueued on the event device. The
2413  *   return value can be less than the value of the *nb_events* parameter when
2414  *   the event devices queue is full or if invalid parameters are specified in a
2415  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2416  *   events at the end of ev[] are not consumed and the caller has to take care
2417  *   of them, and rte_errno is set accordingly. Possible errno values include:
2418  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2419  *              ID is invalid, or an event's sched type doesn't match the
2420  *              capabilities of the destination queue.
2421  *   - ENOSPC   The event port was backpressured and unable to enqueue
2422  *              one or more events. This error code is only applicable to
2423  *              closed systems.
2424  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2425  * @see rte_event_enqueue_burst()
2426  */
2427 static inline uint16_t
2428 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2429 			    const struct rte_event ev[], uint16_t nb_events)
2430 {
2431 	const struct rte_event_fp_ops *fp_ops;
2432 
2433 	fp_ops = &rte_event_fp_ops[dev_id];
2434 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2435 					 fp_ops->enqueue_new_burst);
2436 }
2437 
2438 /**
2439  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
2440  * on an event device designated by its *dev_id* through the event port
2441  * specified by *port_id*.
2442  *
2443  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2444  * application can use this API when the all objects in the burst contains
2445  * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
2446  * function can provide the additional hint to the PMD and optimize if possible.
2447  *
2448  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2449  * has event object of operation type != RTE_EVENT_OP_FORWARD.
2450  *
2451  * @param dev_id
2452  *   The identifier of the device.
2453  * @param port_id
2454  *   The identifier of the event port.
2455  * @param ev
2456  *   Points to an array of *nb_events* objects of type *rte_event* structure
2457  *   which contain the event object enqueue operations to be processed.
2458  * @param nb_events
2459  *   The number of event objects to enqueue, typically number of
2460  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2461  *   available for this port.
2462  *
2463  * @return
2464  *   The number of event objects actually enqueued on the event device. The
2465  *   return value can be less than the value of the *nb_events* parameter when
2466  *   the event devices queue is full or if invalid parameters are specified in a
2467  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2468  *   events at the end of ev[] are not consumed and the caller has to take care
2469  *   of them, and rte_errno is set accordingly. Possible errno values include:
2470  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2471  *              ID is invalid, or an event's sched type doesn't match the
2472  *              capabilities of the destination queue.
2473  *   - ENOSPC   The event port was backpressured and unable to enqueue
2474  *              one or more events. This error code is only applicable to
2475  *              closed systems.
2476  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2477  * @see rte_event_enqueue_burst()
2478  */
2479 static inline uint16_t
2480 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2481 				const struct rte_event ev[], uint16_t nb_events)
2482 {
2483 	const struct rte_event_fp_ops *fp_ops;
2484 
2485 	fp_ops = &rte_event_fp_ops[dev_id];
2486 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2487 					 fp_ops->enqueue_forward_burst);
2488 }
2489 
2490 /**
2491  * Dequeue a burst of events objects or an event object from the event port
2492  * designated by its *event_port_id*, on an event device designated
2493  * by its *dev_id*.
2494  *
2495  * rte_event_dequeue_burst() does not dictate the specifics of scheduling
2496  * algorithm as each eventdev driver may have different criteria to schedule
2497  * an event. However, in general, from an application perspective scheduler may
2498  * use the following scheme to dispatch an event to the port.
2499  *
2500  * 1) Selection of event queue based on
2501  *   a) The list of event queues are linked to the event port.
2502  *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
2503  *   queue selection from list is based on event queue priority relative to
2504  *   other event queue supplied as *priority* in rte_event_queue_setup()
2505  *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
2506  *   queue selection from the list is based on event priority supplied as
2507  *   *priority* in rte_event_enqueue_burst()
2508  * 2) Selection of event
2509  *   a) The number of flows available in selected event queue.
2510  *   b) Schedule type method associated with the event
2511  *
2512  * The *nb_events* parameter is the maximum number of event objects to dequeue
2513  * which are returned in the *ev* array of *rte_event* structure.
2514  *
2515  * The rte_event_dequeue_burst() function returns the number of events objects
2516  * it actually dequeued. A return value equal to *nb_events* means that all
2517  * event objects have been dequeued.
2518  *
2519  * The number of events dequeued is the number of scheduler contexts held by
2520  * this port. These contexts are automatically released in the next
2521  * rte_event_dequeue_burst() invocation if the port supports implicit
2522  * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
2523  * operation can be used to release the contexts early.
2524  *
2525  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2526  * enqueued to the same port that their associated events were dequeued from.
2527  *
2528  * @param dev_id
2529  *   The identifier of the device.
2530  * @param port_id
2531  *   The identifier of the event port.
2532  * @param[out] ev
2533  *   Points to an array of *nb_events* objects of type *rte_event* structure
2534  *   for output to be populated with the dequeued event objects.
2535  * @param nb_events
2536  *   The maximum number of event objects to dequeue, typically number of
2537  *   rte_event_port_dequeue_depth() available for this port.
2538  *
2539  * @param timeout_ticks
2540  *   - 0 no-wait, returns immediately if there is no event.
2541  *   - >0 wait for the event, if the device is configured with
2542  *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
2543  *   at least one event is available or *timeout_ticks* time.
2544  *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
2545  *   then this function will wait until the event available or
2546  *   *dequeue_timeout_ns* ns which was previously supplied to
2547  *   rte_event_dev_configure()
2548  *
2549  * @return
2550  * The number of event objects actually dequeued from the port. The return
2551  * value can be less than the value of the *nb_events* parameter when the
2552  * event port's queue is not full.
2553  *
2554  * @see rte_event_port_dequeue_depth()
2555  */
2556 static inline uint16_t
2557 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2558 			uint16_t nb_events, uint64_t timeout_ticks)
2559 {
2560 	const struct rte_event_fp_ops *fp_ops;
2561 	void *port;
2562 
2563 	fp_ops = &rte_event_fp_ops[dev_id];
2564 	port = fp_ops->data[port_id];
2565 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2566 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2567 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2568 		rte_errno = EINVAL;
2569 		return 0;
2570 	}
2571 
2572 	if (port == NULL) {
2573 		rte_errno = EINVAL;
2574 		return 0;
2575 	}
2576 #endif
2577 	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2578 	/*
2579 	 * Allow zero cost non burst mode routine invocation if application
2580 	 * requests nb_events as const one
2581 	 */
2582 	if (nb_events == 1)
2583 		return (fp_ops->dequeue)(port, ev, timeout_ticks);
2584 	else
2585 		return (fp_ops->dequeue_burst)(port, ev, nb_events,
2586 					       timeout_ticks);
2587 }
2588 
2589 #define RTE_EVENT_DEV_MAINT_OP_FLUSH          (1 << 0)
2590 /**< Force an immediately flush of any buffered events in the port,
2591  * potentially at the cost of additional overhead.
2592  *
2593  * @see rte_event_maintain()
2594  */
2595 
2596 /**
2597  * Maintain an event device.
2598  *
2599  * This function is only relevant for event devices which do not have
2600  * the @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE flag set. Such devices
2601  * require an application thread using a particular port to
2602  * periodically call rte_event_maintain() on that port during periods
2603  * which it is neither attempting to enqueue events to nor dequeue
2604  * events from the port. rte_event_maintain() is a low-overhead
2605  * function and should be called at a high rate (e.g., in the
2606  * application's poll loop).
2607  *
2608  * No port may be left unmaintained.
2609  *
2610  * At the application thread's convenience, rte_event_maintain() may
2611  * (but is not required to) be called even during periods when enqueue
2612  * or dequeue functions are being called, at the cost of a slight
2613  * increase in overhead.
2614  *
2615  * rte_event_maintain() may be called on event devices which have set
2616  * @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, in which case it is a
2617  * no-operation.
2618  *
2619  * @param dev_id
2620  *   The identifier of the device.
2621  * @param port_id
2622  *   The identifier of the event port.
2623  * @param op
2624  *   0, or @ref RTE_EVENT_DEV_MAINT_OP_FLUSH.
2625  * @return
2626  *  - 0 on success.
2627  *  - -EINVAL if *dev_id*,  *port_id*, or *op* is invalid.
2628  *
2629  * @see RTE_EVENT_DEV_CAP_MAINTENANCE_FREE
2630  */
2631 static inline int
2632 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2633 {
2634 	const struct rte_event_fp_ops *fp_ops;
2635 	void *port;
2636 
2637 	fp_ops = &rte_event_fp_ops[dev_id];
2638 	port = fp_ops->data[port_id];
2639 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2640 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2641 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2642 		return -EINVAL;
2643 
2644 	if (port == NULL)
2645 		return -EINVAL;
2646 
2647 	if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2648 		return -EINVAL;
2649 #endif
2650 	rte_eventdev_trace_maintain(dev_id, port_id, op);
2651 
2652 	if (fp_ops->maintain != NULL)
2653 		fp_ops->maintain(port, op);
2654 
2655 	return 0;
2656 }
2657 
2658 /**
2659  * Change the active profile on an event port.
2660  *
2661  * This function is used to change the current active profile on an event port
2662  * when multiple link profiles are configured on an event port through the
2663  * function call ``rte_event_port_profile_links_set``.
2664  *
2665  * On the subsequent ``rte_event_dequeue_burst`` call, only the event queues
2666  * that were associated with the newly active profile will participate in
2667  * scheduling.
2668  *
2669  * @param dev_id
2670  *   The identifier of the device.
2671  * @param port_id
2672  *   The identifier of the event port.
2673  * @param profile_id
2674  *   The identifier of the profile.
2675  * @return
2676  *  - 0 on success.
2677  *  - -EINVAL if *dev_id*,  *port_id*, or *profile_id* is invalid.
2678  */
2679 static inline uint8_t
2680 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2681 {
2682 	const struct rte_event_fp_ops *fp_ops;
2683 	void *port;
2684 
2685 	fp_ops = &rte_event_fp_ops[dev_id];
2686 	port = fp_ops->data[port_id];
2687 
2688 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2689 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2690 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2691 		return -EINVAL;
2692 
2693 	if (port == NULL)
2694 		return -EINVAL;
2695 
2696 	if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2697 		return -EINVAL;
2698 #endif
2699 	rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2700 
2701 	return fp_ops->profile_switch(port, profile_id);
2702 }
2703 
2704 #ifdef __cplusplus
2705 }
2706 #endif
2707 
2708 #endif /* _RTE_EVENTDEV_H_ */
2709