xref: /dpdk/lib/eventdev/rte_eventdev.h (revision c1bdd86d04d161c07c61ec1be8ef081108d29d2a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
11 /**
12  * @file
13  *
14  * RTE Event Device API
15  * ====================
16  *
17  * In a traditional DPDK application model, the application polls Ethdev port RX
18  * queues to look for work, and processing is done in a run-to-completion manner,
19  * after which the packets are transmitted on a Ethdev TX queue. Load is
20  * distributed by statically assigning ports and queues to lcores, and NIC
21  * receive-side scaling (RSS), or similar, is employed to distribute network flows
22  * (and thus work) on the same port across multiple RX queues.
23  *
24  * In contrast, in an event-driven model, as supported by this "eventdev" library,
25  * incoming packets (or other input events) are fed into an event device, which
26  * schedules those packets across the available lcores, in accordance with its configuration.
27  * This event-driven programming model offers applications automatic multicore scaling,
28  * dynamic load balancing, pipelining, packet order maintenance, synchronization,
29  * and prioritization/quality of service.
30  *
31  * The Event Device API is composed of two parts:
32  *
33  * - The application-oriented Event API that includes functions to setup
34  *   an event device (configure it, setup its queues, ports and start it), to
35  *   establish the links between queues and ports to receive events, and so on.
36  *
37  * - The driver-oriented Event API that exports a function allowing
38  *   an event poll Mode Driver (PMD) to register itself as
39  *   an event device driver.
40  *
41  * Application-oriented Event API
42  * ------------------------------
43  *
44  * Event device components:
45  *
46  *                     +-----------------+
47  *                     | +-------------+ |
48  *        +-------+    | |    flow 0   | |
49  *        |Packet |    | +-------------+ |
50  *        |event  |    | +-------------+ |
51  *        |       |    | |    flow 1   | |port_link(port0, queue0)
52  *        +-------+    | +-------------+ |     |     +--------+
53  *        +-------+    | +-------------+ o-----v-----o        |dequeue +------+
54  *        |Crypto |    | |    flow n   | |           | event  +------->|Core 0|
55  *        |work   |    | +-------------+ o----+      | port 0 |        |      |
56  *        |done ev|    |  event queue 0  |    |      +--------+        +------+
57  *        +-------+    +-----------------+    |
58  *        +-------+                           |
59  *        |Timer  |    +-----------------+    |      +--------+
60  *        |expiry |    | +-------------+ |    +------o        |dequeue +------+
61  *        |event  |    | |    flow 0   | o-----------o event  +------->|Core 1|
62  *        +-------+    | +-------------+ |      +----o port 1 |        |      |
63  *       Event enqueue | +-------------+ |      |    +--------+        +------+
64  *     o-------------> | |    flow 1   | |      |
65  *        enqueue(     | +-------------+ |      |
66  *        queue_id,    |                 |      |    +--------+        +------+
67  *        flow_id,     | +-------------+ |      |    |        |dequeue |Core 2|
68  *        sched_type,  | |    flow n   | o-----------o event  +------->|      |
69  *        event_type,  | +-------------+ |      |    | port 2 |        +------+
70  *        subev_type,  |  event queue 1  |      |    +--------+
71  *        event)       +-----------------+      |    +--------+
72  *                                              |    |        |dequeue +------+
73  *        +-------+    +-----------------+      |    | event  +------->|Core n|
74  *        |Core   |    | +-------------+ o-----------o port n |        |      |
75  *        |(SW)   |    | |    flow 0   | |      |    +--------+        +--+---+
76  *        |event  |    | +-------------+ |      |                         |
77  *        +-------+    | +-------------+ |      |                         |
78  *            ^        | |    flow 1   | |      |                         |
79  *            |        | +-------------+ o------+                         |
80  *            |        | +-------------+ |                                |
81  *            |        | |    flow n   | |                                |
82  *            |        | +-------------+ |                                |
83  *            |        |  event queue n  |                                |
84  *            |        +-----------------+                                |
85  *            |                                                           |
86  *            +-----------------------------------------------------------+
87  *
88  * **Event device**: A hardware or software-based event scheduler.
89  *
90  * **Event**: Represents an item of work and is the smallest unit of scheduling.
91  * An event carries metadata, such as queue ID, scheduling type, and event priority,
92  * and data such as one or more packets or other kinds of buffers.
93  * Some examples of events are:
94  * - a software-generated item of work originating from a lcore,
95  *   perhaps carrying a packet to be processed.
96  * - a crypto work completion notification.
97  * - a timer expiry notification.
98  *
99  * **Event queue**: A queue containing events that are to be scheduled by the event device.
100  * An event queue contains events of different flows associated with scheduling
101  * types, such as atomic, ordered, or parallel.
102  * Each event given to an event device must have a valid event queue id field in the metadata,
103  * to specify on which event queue in the device the event must be placed,
104  * for later scheduling.
105  *
106  * **Event port**: An application's interface into the event dev for enqueue and
107  * dequeue operations. Each event port can be linked with one or more
108  * event queues for dequeue operations.
109  * Enqueue and dequeue from a port is not thread-safe, and the expected use-case is
110  * that each port is polled by only a single lcore. [If this is not the case,
111  * a suitable synchronization mechanism should be used to prevent simultaneous
112  * access from multiple lcores.]
113  * To schedule events to an lcore, the event device will schedule them to the event port(s)
114  * being polled by that lcore.
115  *
116  * *NOTE*: By default, all the functions of the Event Device API exported by a PMD
117  * are non-thread-safe functions, which must not be invoked on the same object in parallel on
118  * different logical cores.
119  * For instance, the dequeue function of a PMD cannot be invoked in parallel on two logical
120  * cores to operate on same  event port. Of course, this function
121  * can be invoked in parallel by different logical cores on different ports.
122  * It is the responsibility of the upper level application to enforce this rule.
123  *
124  * In all functions of the Event API, the Event device is
125  * designated by an integer >= 0 named the device identifier *dev_id*
126  *
127  * The functions exported by the application Event API to setup a device
128  * must be invoked in the following order:
129  *     - rte_event_dev_configure()
130  *     - rte_event_queue_setup()
131  *     - rte_event_port_setup()
132  *     - rte_event_port_link()
133  *     - rte_event_dev_start()
134  *
135  * Then, the application can invoke, in any order, the functions
136  * exported by the Event API to dequeue events, enqueue events,
137  * and link and unlink event queue(s) to event ports.
138  *
139  * Before configuring a device, an application should call rte_event_dev_info_get()
140  * to determine the capabilities of the event device, and any queue or port
141  * limits of that device. The parameters set in the various device configuration
142  * structures may need to be adjusted based on the max values provided in the
143  * device information structure returned from the rte_event_dev_info_get() API.
144  * An application may use rte_event_queue_default_conf_get() or
145  * rte_event_port_default_conf_get() to get the default configuration
146  * to set up an event queue or event port by overriding few default values.
147  *
148  * If the application wants to change the configuration (i.e. call
149  * rte_event_dev_configure(), rte_event_queue_setup(), or
150  * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the
151  * device and then do the reconfiguration before calling rte_event_dev_start()
152  * again. The schedule, enqueue and dequeue functions should not be invoked
153  * when the device is stopped.
154  *
155  * Finally, an application can close an Event device by invoking the
156  * rte_event_dev_close() function. Once closed, a device cannot be
157  * reconfigured or restarted.
158  *
159  * Driver-Oriented Event API
160  * -------------------------
161  *
162  * At the Event driver level, Event devices are represented by a generic
163  * data structure of type *rte_event_dev*.
164  *
165  * Event devices are dynamically registered during the PCI/SoC device probing
166  * phase performed at EAL initialization time.
167  * When an Event device is being probed, an *rte_event_dev* structure is allocated
168  * for it and the event_dev_init() function supplied by the Event driver
169  * is invoked to properly initialize the device.
170  *
171  * The role of the device init function is to reset the device hardware or
172  * to initialize the software event driver implementation.
173  *
174  * If the device init operation is successful, the device is assigned a device
175  * id (dev_id) for application use.
176  * Otherwise, the *rte_event_dev* structure is freed.
177  *
178  * Each function of the application Event API invokes a specific function
179  * of the PMD that controls the target device designated by its device
180  * identifier.
181  *
182  * For this purpose, all device-specific functions of an Event driver are
183  * supplied through a set of pointers contained in a generic structure of type
184  * *event_dev_ops*.
185  * The address of the *event_dev_ops* structure is stored in the *rte_event_dev*
186  * structure by the device init function of the Event driver, which is
187  * invoked during the PCI/SoC device probing phase, as explained earlier.
188  *
189  * In other words, each function of the Event API simply retrieves the
190  * *rte_event_dev* structure associated with the device identifier and
191  * performs an indirect invocation of the corresponding driver function
192  * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure.
193  *
194  * For performance reasons, the addresses of the fast-path functions of the
195  * event driver are not contained in the *event_dev_ops* structure.
196  * Instead, they are directly stored at the beginning of the *rte_event_dev*
197  * structure to avoid an extra indirect memory access during their invocation.
198  *
199  * Event Enqueue, Dequeue and Scheduling
200  * -------------------------------------
201  *
202  * RTE event device drivers do not use interrupts for enqueue or dequeue
203  * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue
204  * functions to applications.
205  *
206  * The events are injected to event device through *enqueue* operation by
207  * event producers in the system. The typical event producers are ethdev
208  * subsystem for generating packet events, CPU(SW) for generating events based
209  * on different stages of application processing, cryptodev for generating
210  * crypto work completion notification etc
211  *
212  * The *dequeue* operation gets one or more events from the event ports.
213  * The application processes the events and sends them to a downstream event queue through
214  * rte_event_enqueue_burst(), if it is an intermediate stage of event processing.
215  * On the final stage of processing, the application may use the Tx adapter API for maintaining
216  * the event ingress order while sending the packet/event on the wire via NIC Tx.
217  *
218  * The point at which events are scheduled to ports depends on the device.
219  * For hardware devices, scheduling occurs asynchronously without any software
220  * intervention. Software schedulers can either be distributed
221  * (each worker thread schedules events to its own port) or centralized
222  * (a dedicated thread schedules to all ports). Distributed software schedulers
223  * perform the scheduling inside the enqueue or dequeue functions, whereas centralized
224  * software schedulers need a dedicated service core for scheduling.
225  * The absence of the RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag
226  * indicates that the device is centralized and thus needs a dedicated scheduling
227  * thread (generally an RTE service that should be mapped to one or more service cores)
228  * that repeatedly calls the software specific scheduling function.
229  *
230  * An event driven worker thread has following typical workflow on fastpath:
231  * \code{.c}
232  *	while (1) {
233  *		rte_event_dequeue_burst(...);
234  *		(event processing)
235  *		rte_event_enqueue_burst(...);
236  *	}
237  * \endcode
238  */
239 
240 #include <rte_compat.h>
241 #include <rte_common.h>
242 #include <rte_errno.h>
243 #include <rte_mbuf_pool_ops.h>
244 #include <rte_mempool.h>
245 
246 #include "rte_eventdev_trace_fp.h"
247 
248 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
249 struct rte_event;
250 
251 /* Event device capability bitmap flags */
252 #define RTE_EVENT_DEV_CAP_QUEUE_QOS           (1ULL << 0)
253 /**< Event scheduling prioritization is based on the priority and weight
254  * associated with each event queue.
255  *
256  * Events from a queue with highest priority
257  * are scheduled first. If the queues are of same priority, weight of the queues
258  * are considered to select a queue in a weighted round robin fashion.
259  * Subsequent dequeue calls from an event port could see events from the same
260  * event queue, if the queue is configured with an affinity count. Affinity
261  * count is the number of subsequent dequeue calls, in which an event port
262  * should use the same event queue if the queue is non-empty
263  *
264  * NOTE: A device may use both queue prioritization and event prioritization
265  * (@ref RTE_EVENT_DEV_CAP_EVENT_QOS capability) when making packet scheduling decisions.
266  *
267  *  @see rte_event_queue_setup()
268  *  @see rte_event_queue_attr_set()
269  */
270 #define RTE_EVENT_DEV_CAP_EVENT_QOS           (1ULL << 1)
271 /**< Event scheduling prioritization is based on the priority associated with
272  *  each event.
273  *
274  *  Priority of each event is supplied in *rte_event* structure
275  *  on each enqueue operation.
276  *  If this capability is not set, the priority field of the event structure
277  *  is ignored for each event.
278  *
279  * NOTE: A device may use both queue prioritization (@ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability)
280  * and event prioritization when making packet scheduling decisions.
281 
282  *  @see rte_event_enqueue_burst()
283  */
284 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED   (1ULL << 2)
285 /**< Event device operates in distributed scheduling mode.
286  *
287  * In distributed scheduling mode, event scheduling happens in HW or
288  * rte_event_dequeue_burst() / rte_event_enqueue_burst() or the combination of these two.
289  * If the flag is not set then eventdev is centralized and thus needs a
290  * dedicated service core that acts as a scheduling thread.
291  *
292  * @see rte_event_dev_service_id_get()
293  */
294 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES     (1ULL << 3)
295 /**< Event device is capable of accepting enqueued events, of any type
296  * advertised as supported by the device, to all destination queues.
297  *
298  * When this capability is set, and @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag is set
299  * in @ref rte_event_queue_conf.event_queue_cfg, the "schedule_type" field of the
300  * @ref rte_event_queue_conf structure is ignored when a queue is being configured.
301  * Instead the "sched_type" field of each event enqueued is used to
302  * select the scheduling to be performed on that event.
303  *
304  * If this capability is not set, or the configuration flag is not set,
305  * the queue only supports events of the *RTE_SCHED_TYPE_* type specified
306  * in the @ref rte_event_queue_conf structure  at time of configuration.
307  * The behaviour when events of other scheduling types are sent to the queue is
308  * undefined.
309  *
310  * @see RTE_EVENT_QUEUE_CFG_ALL_TYPES
311  * @see RTE_SCHED_TYPE_ATOMIC
312  * @see RTE_SCHED_TYPE_ORDERED
313  * @see RTE_SCHED_TYPE_PARALLEL
314  * @see rte_event_queue_conf.event_queue_cfg
315  * @see rte_event_queue_conf.schedule_type
316  * @see rte_event_enqueue_burst()
317  */
318 #define RTE_EVENT_DEV_CAP_BURST_MODE          (1ULL << 4)
319 /**< Event device is capable of operating in burst mode for enqueue(forward,
320  * release) and dequeue operation.
321  *
322  * If this capability is not set, application
323  * can still use the rte_event_dequeue_burst() and rte_event_enqueue_burst() but
324  * PMD accepts or returns only one event at a time.
325  *
326  * @see rte_event_dequeue_burst()
327  * @see rte_event_enqueue_burst()
328  */
329 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE    (1ULL << 5)
330 /**< Event device ports support disabling the implicit release feature, in
331  * which the port will release all unreleased events in its dequeue operation.
332  *
333  * If this capability is set and the port is configured with implicit release
334  * disabled, the application is responsible for explicitly releasing events
335  * using either the @ref RTE_EVENT_OP_FORWARD or the @ref RTE_EVENT_OP_RELEASE event
336  * enqueue operations.
337  *
338  * @see rte_event_dequeue_burst()
339  * @see rte_event_enqueue_burst()
340  */
341 
342 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE         (1ULL << 6)
343 /**< Event device is capable of operating in non-sequential mode.
344  *
345  * The path of the event is not necessary to be sequential. Application can change
346  * the path of event at runtime and events may be sent to queues in any order.
347  *
348  * If the flag is not set, then event each event will follow a path from queue 0
349  * to queue 1 to queue 2 etc.
350  * The eventdev will return an error when the application enqueues an event for a
351  * qid which is not the next in the sequence.
352  */
353 
354 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK   (1ULL << 7)
355 /**< Event device is capable of reconfiguring the queue/port link at runtime.
356  *
357  * If the flag is not set, the eventdev queue/port link is only can be
358  * configured during  initialization, or by stopping the device and
359  * then later restarting it after reconfiguration.
360  *
361  * @see rte_event_port_link()
362  * @see rte_event_port_unlink()
363  */
364 
365 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
366 /**< Event device is capable of setting up links between multiple queues and a single port.
367  *
368  * If the flag is not set, each port may only be linked to a single queue, and
369  * so can only receive events from that queue.
370  * However, each queue may be linked to multiple ports.
371  *
372  * @see rte_event_port_link()
373  */
374 
375 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
376 /**< Event device preserves the flow ID from the enqueued event to the dequeued event.
377  *
378  * If this flag is not set,
379  * the content of the flow-id field in dequeued events is implementation dependent.
380  *
381  * @see rte_event_dequeue_burst()
382  */
383 
384 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
385 /**< Event device *does not* require calls to rte_event_maintain().
386  *
387  * An event device that does not set this flag requires calls to
388  * rte_event_maintain() during periods when neither
389  * rte_event_dequeue_burst() nor rte_event_enqueue_burst() are called
390  * on a port. This will allow the event device to perform internal
391  * processing, such as flushing buffered events, return credits to a
392  * global pool, or process signaling related to load balancing.
393  *
394  * @see rte_event_maintain()
395  */
396 
397 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
398 /**< Event device is capable of changing the queue attributes at runtime i.e
399  * after rte_event_queue_setup() or rte_event_dev_start() call sequence.
400  *
401  * If this flag is not set, event queue attributes can only be configured during
402  * rte_event_queue_setup().
403  *
404  * @see rte_event_queue_setup()
405  */
406 
407 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
408 /**< Event device is capable of supporting multiple link profiles per event port.
409  *
410  * When set, the value of `rte_event_dev_info::max_profiles_per_port` is greater
411  * than one, and multiple profiles may be configured and then switched at runtime.
412  * If not set, only a single profile may be configured, which may itself be
413  * runtime adjustable (if @ref RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK is set).
414  *
415  * @see rte_event_port_profile_links_set()
416  * @see rte_event_port_profile_links_get()
417  * @see rte_event_port_profile_switch()
418  * @see RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK
419  */
420 
421 #define RTE_EVENT_DEV_CAP_ATOMIC  (1ULL << 13)
422 /**< Event device is capable of atomic scheduling.
423  * When this flag is set, the application can configure queues with scheduling type
424  * atomic on this event device.
425  *
426  * @see RTE_SCHED_TYPE_ATOMIC
427  */
428 
429 #define RTE_EVENT_DEV_CAP_ORDERED  (1ULL << 14)
430 /**< Event device is capable of ordered scheduling.
431  * When this flag is set, the application can configure queues with scheduling type
432  * ordered on this event device.
433  *
434  * @see RTE_SCHED_TYPE_ORDERED
435  */
436 
437 #define RTE_EVENT_DEV_CAP_PARALLEL  (1ULL << 15)
438 /**< Event device is capable of parallel scheduling.
439  * When this flag is set, the application can configure queues with scheduling type
440  * parallel on this event device.
441  *
442  * @see RTE_SCHED_TYPE_PARALLEL
443  */
444 
445 #define RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ  (1ULL << 16)
446 /**< Event device is capable of independent enqueue.
447  * A new capability, RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ, will indicate that Eventdev
448  * supports the enqueue in any order or specifically in a different order than the
449  * dequeue. Eventdev PMD can either dequeue events in the changed order in which
450  * they are enqueued or restore the original order before sending them to the
451  * underlying hardware device. A flag is provided during the port configuration to
452  * inform Eventdev PMD that the application intends to use an independent enqueue
453  * order on a particular port. Note that this capability only matters for eventdevs
454  * supporting burst mode.
455  *
456  * When an implicit release is enabled on a port, Eventdev PMD will also handle
457  * the insertion of RELEASE events in place of dropped events. The independent enqueue
458  * feature only applies to FORWARD and RELEASE events. New events (op=RTE_EVENT_OP_NEW)
459  * will be dequeued in the order the application enqueues them and do not maintain
460  * any order relative to FORWARD/RELEASE events. FORWARD vs NEW relaxed ordering
461  * only applies to ports that have enabled independent enqueue feature.
462  */
463 
464 #define RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE (1ULL << 17)
465 /**< Event device supports event pre-scheduling.
466  *
467  * When this capability is available, the application can enable event pre-scheduling on the event
468  * device to pre-schedule events to a event port when `rte_event_dequeue_burst()`
469  * is issued.
470  * The pre-schedule process starts with the `rte_event_dequeue_burst()` call and the
471  * pre-scheduled events are returned on the next `rte_event_dequeue_burst()` call.
472  *
473  * @see rte_event_dev_configure()
474  */
475 
476 #define RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE (1ULL << 18)
477 /**< Event device supports adaptive event pre-scheduling.
478  *
479  * When this capability is available, the application can enable adaptive pre-scheduling
480  * on the event device where the events are pre-scheduled when there are no forward
481  * progress constraints with the currently held flow contexts.
482  * The pre-schedule process starts with the `rte_event_dequeue_burst()` call and the
483  * pre-scheduled events are returned on the next `rte_event_dequeue_burst()` call.
484  *
485  * @see rte_event_dev_configure()
486  */
487 
488 #define RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE (1ULL << 19)
489 /**< Event device supports event pre-scheduling per event port.
490  *
491  * When this flag is set, the event device allows controlling the event
492  * pre-scheduling at a event port granularity.
493  *
494  * @see rte_event_dev_configure()
495  * @see rte_event_port_preschedule_modify()
496  */
497 
498 /* Event device priority levels */
499 #define RTE_EVENT_DEV_PRIORITY_HIGHEST   0
500 /**< Highest priority level for events and queues.
501  *
502  * @see rte_event_queue_setup()
503  * @see rte_event_enqueue_burst()
504  * @see rte_event_port_link()
505  */
506 #define RTE_EVENT_DEV_PRIORITY_NORMAL    128
507 /**< Normal priority level for events and queues.
508  *
509  * @see rte_event_queue_setup()
510  * @see rte_event_enqueue_burst()
511  * @see rte_event_port_link()
512  */
513 #define RTE_EVENT_DEV_PRIORITY_LOWEST    255
514 /**< Lowest priority level for events and queues.
515  *
516  * @see rte_event_queue_setup()
517  * @see rte_event_enqueue_burst()
518  * @see rte_event_port_link()
519  */
520 
521 /* Event queue scheduling weights */
522 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
523 /**< Highest weight of an event queue.
524  *
525  * @see rte_event_queue_attr_get()
526  * @see rte_event_queue_attr_set()
527  */
528 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
529 /**< Lowest weight of an event queue.
530  *
531  * @see rte_event_queue_attr_get()
532  * @see rte_event_queue_attr_set()
533  */
534 
535 /* Event queue scheduling affinity */
536 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
537 /**< Highest scheduling affinity of an event queue.
538  *
539  * @see rte_event_queue_attr_get()
540  * @see rte_event_queue_attr_set()
541  */
542 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
543 /**< Lowest scheduling affinity of an event queue.
544  *
545  * @see rte_event_queue_attr_get()
546  * @see rte_event_queue_attr_set()
547  */
548 
549 /**
550  * Get the total number of event devices.
551  *
552  * @return
553  *   The total number of usable event devices.
554  */
555 uint8_t
556 rte_event_dev_count(void);
557 
558 /**
559  * Get the device identifier for the named event device.
560  *
561  * @param name
562  *   Event device name to select the event device identifier.
563  *
564  * @return
565  *   Event device identifier (dev_id >= 0) on success.
566  *   Negative error code on failure:
567  *   - -EINVAL - input name parameter is invalid.
568  *   - -ENODEV - no event device found with that name.
569  */
570 int
571 rte_event_dev_get_dev_id(const char *name);
572 
573 /**
574  * Return the NUMA socket to which a device is connected.
575  *
576  * @param dev_id
577  *   The identifier of the device.
578  * @return
579  *   The NUMA socket id to which the device is connected or
580  *   a default of zero if the socket could not be determined.
581  *   -EINVAL on error, where the given dev_id value does not
582  *   correspond to any event device.
583  */
584 int
585 rte_event_dev_socket_id(uint8_t dev_id);
586 
587 /**
588  * Event device information
589  */
590 struct rte_event_dev_info {
591 	const char *driver_name;	/**< Event driver name. */
592 	struct rte_device *dev;	/**< Device information. */
593 	uint32_t min_dequeue_timeout_ns;
594 	/**< Minimum global dequeue timeout(ns) supported by this device. */
595 	uint32_t max_dequeue_timeout_ns;
596 	/**< Maximum global dequeue timeout(ns) supported by this device. */
597 	uint32_t dequeue_timeout_ns;
598 	/**< Configured global dequeue timeout(ns) for this device. */
599 	uint8_t max_event_queues;
600 	/**< Maximum event queues supported by this device.
601 	 *
602 	 * This count excludes any queues covered by @ref max_single_link_event_port_queue_pairs.
603 	 */
604 	uint32_t max_event_queue_flows;
605 	/**< Maximum number of flows within an event queue supported by this device. */
606 	uint8_t max_event_queue_priority_levels;
607 	/**< Maximum number of event queue priority levels supported by this device.
608 	 *
609 	 * Valid when the device has @ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
610 	 *
611 	 * The implementation shall normalize priority values specified between
612 	 * @ref RTE_EVENT_DEV_PRIORITY_HIGHEST and @ref RTE_EVENT_DEV_PRIORITY_LOWEST
613 	 * to map them internally to this range of priorities.
614 	 * [For devices supporting a power-of-2 number of priority levels, this
615 	 * normalization will be done via a right-shift operation, so only the top
616 	 * log2(max_levels) bits will be used by the event device.]
617 	 *
618 	 * @see rte_event_queue_conf.priority
619 	 */
620 	uint8_t max_event_priority_levels;
621 	/**< Maximum number of event priority levels by this device.
622 	 *
623 	 * Valid when the device has @ref RTE_EVENT_DEV_CAP_EVENT_QOS capability.
624 	 *
625 	 * The implementation shall normalize priority values specified between
626 	 * @ref RTE_EVENT_DEV_PRIORITY_HIGHEST and @ref RTE_EVENT_DEV_PRIORITY_LOWEST
627 	 * to map them internally to this range of priorities.
628 	 * [For devices supporting a power-of-2 number of priority levels, this
629 	 * normalization will be done via a right-shift operation, so only the top
630 	 * log2(max_levels) bits will be used by the event device.]
631 	 *
632 	 * @see rte_event.priority
633 	 */
634 	uint8_t max_event_ports;
635 	/**< Maximum number of event ports supported by this device.
636 	 *
637 	 * This count excludes any ports covered by @ref max_single_link_event_port_queue_pairs.
638 	 */
639 	uint8_t max_event_port_dequeue_depth;
640 	/**< Maximum number of events that can be dequeued at a time from an event port
641 	 * on this device.
642 	 *
643 	 * A device that does not support burst dequeue
644 	 * (@ref RTE_EVENT_DEV_CAP_BURST_MODE) will set this to 1.
645 	 */
646 	uint32_t max_event_port_enqueue_depth;
647 	/**< Maximum number of events that can be enqueued at a time to an event port
648 	 * on this device.
649 	 *
650 	 * A device that does not support burst enqueue
651 	 * (@ref RTE_EVENT_DEV_CAP_BURST_MODE) will set this to 1.
652 	 */
653 	uint8_t max_event_port_links;
654 	/**< Maximum number of queues that can be linked to a single event port on this device.
655 	 */
656 	int32_t max_num_events;
657 	/**< A *closed system* event dev has a limit on the number of events it
658 	 * can manage at a time.
659 	 * Once the number of events tracked by an eventdev exceeds this number,
660 	 * any enqueues of NEW events will fail.
661 	 * An *open system* event dev does not have a limit and will specify this as -1.
662 	 */
663 	uint32_t event_dev_cap;
664 	/**< Event device capabilities flags (RTE_EVENT_DEV_CAP_*). */
665 	uint8_t max_single_link_event_port_queue_pairs;
666 	/**< Maximum number of event ports and queues, supported by this device,
667 	 * that are optimized for (and only capable of) single-link configurations.
668 	 * These ports and queues are not accounted for in @ref max_event_ports
669 	 * or @ref max_event_queues.
670 	 */
671 	uint8_t max_profiles_per_port;
672 	/**< Maximum number of event queue link profiles per event port.
673 	 * A device that doesn't support multiple profiles will set this as 1.
674 	 */
675 };
676 
677 /**
678  * Retrieve details of an event device's capabilities and configuration limits.
679  *
680  * @param dev_id
681  *   The identifier of the device.
682  *
683  * @param[out] dev_info
684  *   A pointer to a structure of type *rte_event_dev_info* to be filled with the
685  *   information about the device's capabilities.
686  *
687  * @return
688  *   - 0: Success, information about the event device is present in dev_info.
689  *   - <0: Failure, error code returned by the function.
690  *     - -EINVAL - invalid input parameters, e.g. incorrect device id.
691  *     - -ENOTSUP - device does not support returning capabilities information.
692  */
693 int
694 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
695 
696 /**
697  * The count of ports.
698  */
699 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
700 /**
701  * The count of queues.
702  */
703 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
704 /**
705  * The status of the device, zero for stopped, non-zero for started.
706  */
707 #define RTE_EVENT_DEV_ATTR_STARTED 2
708 
709 /**
710  * Get an attribute from a device.
711  *
712  * @param dev_id Eventdev id
713  * @param attr_id The attribute ID to retrieve
714  * @param[out] attr_value A pointer that will be filled in with the attribute
715  *             value if successful.
716  *
717  * @return
718  *   - 0: Successfully retrieved attribute value
719  *   - -EINVAL: Invalid device or  *attr_id* provided, or *attr_value* is NULL
720  */
721 int
722 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
723 		       uint32_t *attr_value);
724 
725 
726 /* Event device configuration bitmap flags */
727 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
728 /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns.
729  *  @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst()
730  */
731 
732 /** Event device pre-schedule type enumeration. */
733 enum rte_event_dev_preschedule_type {
734 	RTE_EVENT_PRESCHEDULE_NONE,
735 	/**< Disable pre-schedule across the event device or on a given event port.
736 	 * @ref rte_event_dev_config.preschedule_type
737 	 * @ref rte_event_port_preschedule_modify()
738 	 */
739 	RTE_EVENT_PRESCHEDULE,
740 	/**< Enable pre-schedule always across the event device or a given event port.
741 	 * @ref rte_event_dev_config.preschedule_type
742 	 * @ref rte_event_port_preschedule_modify()
743 	 * @see RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE
744 	 * @see RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE
745 	 */
746 	RTE_EVENT_PRESCHEDULE_ADAPTIVE,
747 	/**< Enable adaptive pre-schedule across the event device or a given event port.
748 	 * Delay issuing pre-schedule until there are no forward progress constraints with
749 	 * the held flow contexts.
750 	 * @ref rte_event_dev_config.preschedule_type
751 	 * @ref rte_event_port_preschedule_modify()
752 	 * @see RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE
753 	 * @see RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE
754 	 */
755 };
756 
757 /** Event device configuration structure */
758 struct rte_event_dev_config {
759 	uint32_t dequeue_timeout_ns;
760 	/**< rte_event_dequeue_burst() timeout on this device.
761 	 * This value should be in the range of @ref rte_event_dev_info.min_dequeue_timeout_ns and
762 	 * @ref rte_event_dev_info.max_dequeue_timeout_ns returned by
763 	 * @ref rte_event_dev_info_get()
764 	 * The value 0 is allowed, in which case, default dequeue timeout used.
765 	 * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
766 	 */
767 	int32_t nb_events_limit;
768 	/**< In a *closed system* this field is the limit on maximum number of
769 	 * events that can be inflight in the eventdev at a given time. The
770 	 * limit is required to ensure that the finite space in a closed system
771 	 * is not exhausted.
772 	 * The value cannot exceed @ref rte_event_dev_info.max_num_events
773 	 * returned by rte_event_dev_info_get().
774 	 *
775 	 * This value should be set to -1 for *open systems*, that is,
776 	 * those systems returning -1 in @ref rte_event_dev_info.max_num_events.
777 	 *
778 	 * @see rte_event_port_conf.new_event_threshold
779 	 */
780 	uint8_t nb_event_queues;
781 	/**< Number of event queues to configure on this device.
782 	 * This value *includes* any single-link queue-port pairs to be used.
783 	 * This value cannot exceed @ref rte_event_dev_info.max_event_queues +
784 	 * @ref rte_event_dev_info.max_single_link_event_port_queue_pairs
785 	 * returned by rte_event_dev_info_get().
786 	 * The number of non-single-link queues i.e. this value less
787 	 * *nb_single_link_event_port_queues* in this struct, cannot exceed
788 	 * @ref rte_event_dev_info.max_event_queues
789 	 */
790 	uint8_t nb_event_ports;
791 	/**< Number of event ports to configure on this device.
792 	 * This value *includes* any single-link queue-port pairs to be used.
793 	 * This value cannot exceed @ref rte_event_dev_info.max_event_ports +
794 	 * @ref rte_event_dev_info.max_single_link_event_port_queue_pairs
795 	 * returned by rte_event_dev_info_get().
796 	 * The number of non-single-link ports i.e. this value less
797 	 * *nb_single_link_event_port_queues* in this struct, cannot exceed
798 	 * @ref rte_event_dev_info.max_event_ports
799 	 */
800 	uint32_t nb_event_queue_flows;
801 	/**< Max number of flows needed for a single event queue on this device.
802 	 * This value cannot exceed @ref rte_event_dev_info.max_event_queue_flows
803 	 * returned by rte_event_dev_info_get()
804 	 */
805 	uint32_t nb_event_port_dequeue_depth;
806 	/**< Max number of events that can be dequeued at a time from an event port on this device.
807 	 * This value cannot exceed @ref rte_event_dev_info.max_event_port_dequeue_depth
808 	 * returned by rte_event_dev_info_get().
809 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
810 	 * @see rte_event_port_setup() rte_event_dequeue_burst()
811 	 */
812 	uint32_t nb_event_port_enqueue_depth;
813 	/**< Maximum number of events can be enqueued at a time to an event port on this device.
814 	 * This value cannot exceed @ref rte_event_dev_info.max_event_port_enqueue_depth
815 	 * returned by rte_event_dev_info_get().
816 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
817 	 * @see rte_event_port_setup() rte_event_enqueue_burst()
818 	 */
819 	uint32_t event_dev_cfg;
820 	/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
821 	uint8_t nb_single_link_event_port_queues;
822 	/**< Number of event ports and queues that will be singly-linked to
823 	 * each other. These are a subset of the overall event ports and
824 	 * queues; this value cannot exceed *nb_event_ports* or
825 	 * *nb_event_queues*. If the device has ports and queues that are
826 	 * optimized for single-link usage, this field is a hint for how many
827 	 * to allocate; otherwise, regular event ports and queues will be used.
828 	 */
829 	enum rte_event_dev_preschedule_type preschedule_type;
830 	/**< Event pre-schedule type to use across the event device, if supported.
831 	 * @see RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE
832 	 * @see RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE
833 	 */
834 };
835 
836 /**
837  * Configure an event device.
838  *
839  * This function must be invoked before any other configuration function in the
840  * API, when preparing an event device for application use.
841  * This function can also be re-invoked when a device is in the stopped state.
842  *
843  * The caller should use rte_event_dev_info_get() to get the capabilities and
844  * resource limits for this event device before calling this API.
845  * Many values in the dev_conf input parameter are subject to limits given
846  * in the device information returned from rte_event_dev_info_get().
847  *
848  * @param dev_id
849  *   The identifier of the device to configure.
850  * @param dev_conf
851  *   The event device configuration structure.
852  *
853  * @return
854  *   - 0: Success, device configured.
855  *   - <0: Error code returned by the driver configuration function.
856  *     - -ENOTSUP - device does not support configuration.
857  *     - -EINVAL  - invalid input parameter.
858  *     - -EBUSY   - device has already been started.
859  */
860 int
861 rte_event_dev_configure(uint8_t dev_id,
862 			const struct rte_event_dev_config *dev_conf);
863 
864 /* Event queue specific APIs */
865 
866 /* Event queue configuration bitmap flags */
867 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES          (1ULL << 0)
868 /**< Allow events with schedule types ATOMIC, ORDERED, and PARALLEL to be enqueued to this queue.
869  *
870  * The scheduling type to be used is that specified in each individual event.
871  * This flag can only be set when configuring queues on devices reporting the
872  * @ref RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES capability.
873  *
874  * Without this flag, only events with the specific scheduling type configured at queue setup
875  * can be sent to the queue.
876  *
877  * @see RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES
878  * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
879  * @see rte_event_enqueue_burst()
880  */
881 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK        (1ULL << 1)
882 /**< This event queue links only to a single event port.
883  *
884  * No load-balancing of events is performed, as all events
885  * sent to this queue end up at the same event port.
886  * The number of queues on which this flag is to be set must be
887  * configured at device configuration time, by setting
888  * @ref rte_event_dev_config.nb_single_link_event_port_queues
889  * parameter appropriately.
890  *
891  * This flag serves as a hint only, any devices without specific
892  * support for single-link queues can fall-back automatically to
893  * using regular queues with a single destination port.
894  *
895  *  @see rte_event_dev_info.max_single_link_event_port_queue_pairs
896  *  @see rte_event_dev_config.nb_single_link_event_port_queues
897  *  @see rte_event_port_setup(), rte_event_port_link()
898  */
899 
900 /** Event queue configuration structure */
901 struct rte_event_queue_conf {
902 	uint32_t nb_atomic_flows;
903 	/**< The maximum number of active flows this queue can track at any
904 	 * given time.
905 	 *
906 	 * If the queue is configured for atomic scheduling (by
907 	 * applying the @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to
908 	 * @ref rte_event_queue_conf.event_queue_cfg
909 	 * or @ref RTE_SCHED_TYPE_ATOMIC flag to @ref rte_event_queue_conf.schedule_type), then the
910 	 * value must be in the range of [1, @ref rte_event_dev_config.nb_event_queue_flows],
911 	 * which was previously provided in rte_event_dev_configure().
912 	 *
913 	 * If the queue is not configured for atomic scheduling this value is ignored.
914 	 */
915 	uint32_t nb_atomic_order_sequences;
916 	/**< The maximum number of outstanding events waiting to be
917 	 * reordered by this queue. In other words, the number of entries in
918 	 * this queue’s reorder buffer. When the number of events in the
919 	 * reorder buffer reaches to *nb_atomic_order_sequences* then the
920 	 * scheduler cannot schedule the events from this queue and no
921 	 * events will be returned from dequeue until one or more entries are
922 	 * freed up/released.
923 	 *
924 	 * If the queue is configured for ordered scheduling (by applying the
925 	 * @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to @ref rte_event_queue_conf.event_queue_cfg or
926 	 * @ref RTE_SCHED_TYPE_ORDERED flag to @ref rte_event_queue_conf.schedule_type),
927 	 * then the value must be in the range of
928 	 * [1, @ref rte_event_dev_config.nb_event_queue_flows], which was
929 	 * previously supplied to rte_event_dev_configure().
930 	 *
931 	 * If the queue is not configured for ordered scheduling, then this value is ignored.
932 	 */
933 	uint32_t event_queue_cfg;
934 	/**< Queue cfg flags(EVENT_QUEUE_CFG_) */
935 	uint8_t schedule_type;
936 	/**< Queue schedule type(RTE_SCHED_TYPE_*).
937 	 *
938 	 * Valid when @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag is not set in
939 	 * @ref rte_event_queue_conf.event_queue_cfg.
940 	 *
941 	 * If the @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag is set, then this field is ignored.
942 	 *
943 	 * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
944 	 */
945 	uint8_t priority;
946 	/**< Priority for this event queue relative to other event queues.
947 	 *
948 	 * The requested priority should in the range of
949 	 * [@ref RTE_EVENT_DEV_PRIORITY_HIGHEST, @ref RTE_EVENT_DEV_PRIORITY_LOWEST].
950 	 * The implementation shall normalize the requested priority to
951 	 * event device supported priority value.
952 	 *
953 	 * Valid when the device has @ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability,
954 	 * ignored otherwise
955 	 */
956 	uint8_t weight;
957 	/**< Weight of the event queue relative to other event queues.
958 	 *
959 	 * The requested weight should be in the range of
960 	 * [@ref RTE_EVENT_QUEUE_WEIGHT_HIGHEST, @ref RTE_EVENT_QUEUE_WEIGHT_LOWEST].
961 	 * The implementation shall normalize the requested weight to event
962 	 * device supported weight value.
963 	 *
964 	 * Valid when the device has @ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability,
965 	 * ignored otherwise.
966 	 */
967 	uint8_t affinity;
968 	/**< Affinity of the event queue relative to other event queues.
969 	 *
970 	 * The requested affinity should be in the range of
971 	 * [@ref RTE_EVENT_QUEUE_AFFINITY_HIGHEST, @ref RTE_EVENT_QUEUE_AFFINITY_LOWEST].
972 	 * The implementation shall normalize the requested affinity to event
973 	 * device supported affinity value.
974 	 *
975 	 * Valid when the device has @ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability,
976 	 * ignored otherwise.
977 	 */
978 };
979 
980 /**
981  * Retrieve the default configuration information of an event queue designated
982  * by its *queue_id* from the event driver for an event device.
983  *
984  * This function intended to be used in conjunction with rte_event_queue_setup()
985  * where caller needs to set up the queue by overriding few default values.
986  *
987  * @param dev_id
988  *   The identifier of the device.
989  * @param queue_id
990  *   The index of the event queue to get the configuration information.
991  *   The value must be less than @ref rte_event_dev_config.nb_event_queues
992  *   previously supplied to rte_event_dev_configure().
993  * @param[out] queue_conf
994  *   The pointer to the default event queue configuration data.
995  * @return
996  *   - 0: Success, driver updates the default event queue configuration data.
997  *   - <0: Error code returned by the driver info get function.
998  *
999  * @see rte_event_queue_setup()
1000  */
1001 int
1002 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
1003 				 struct rte_event_queue_conf *queue_conf);
1004 
1005 /**
1006  * Allocate and set up an event queue for an event device.
1007  *
1008  * @param dev_id
1009  *   The identifier of the device.
1010  * @param queue_id
1011  *   The index of the event queue to setup. The value must be
1012  *   less than @ref rte_event_dev_config.nb_event_queues previously supplied to
1013  *   rte_event_dev_configure().
1014  * @param queue_conf
1015  *   The pointer to the configuration data to be used for the event queue.
1016  *   NULL value is allowed, in which case default configuration	used.
1017  *
1018  * @see rte_event_queue_default_conf_get()
1019  *
1020  * @return
1021  *   - 0: Success, event queue correctly set up.
1022  *   - <0: event queue configuration failed.
1023  */
1024 int
1025 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
1026 		      const struct rte_event_queue_conf *queue_conf);
1027 
1028 /**
1029  * Queue attribute id for the priority of the queue.
1030  */
1031 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
1032 /**
1033  * Queue attribute id for the number of atomic flows configured for the queue.
1034  */
1035 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
1036 /**
1037  * Queue attribute id for the number of atomic order sequences configured for the queue.
1038  */
1039 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
1040 /**
1041  * Queue attribute id for the configuration flags for the queue.
1042  */
1043 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
1044 /**
1045  * Queue attribute id for the schedule type of the queue.
1046  */
1047 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
1048 /**
1049  * Queue attribute id for the weight of the queue.
1050  */
1051 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
1052 /**
1053  * Queue attribute id for the affinity of the queue.
1054  */
1055 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
1056 
1057 /**
1058  * Get an attribute of an event queue.
1059  *
1060  * @param dev_id
1061  *   The identifier of the device.
1062  * @param queue_id
1063  *   The index of the event queue to query. The value must be less than
1064  *   @ref rte_event_dev_config.nb_event_queues previously supplied to rte_event_dev_configure().
1065  * @param attr_id
1066  *   The attribute ID to retrieve (RTE_EVENT_QUEUE_ATTR_*).
1067  * @param[out] attr_value
1068  *   A pointer that will be filled in with the attribute value if successful.
1069  *
1070  * @return
1071  *   - 0: Successfully returned value
1072  *   - -EINVAL: invalid device, queue or attr_id provided, or attr_value was NULL.
1073  *   - -EOVERFLOW: returned when attr_id is set to
1074  *   @ref RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES is
1075  *   set in the queue configuration flags.
1076  */
1077 int
1078 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
1079 			uint32_t *attr_value);
1080 
1081 /**
1082  * Set an event queue attribute.
1083  *
1084  * @param dev_id
1085  *   The identifier of the device.
1086  * @param queue_id
1087  *   The index of the event queue to configure. The value must be less than
1088  *   @ref rte_event_dev_config.nb_event_queues previously supplied to rte_event_dev_configure().
1089  * @param attr_id
1090  *   The attribute ID to set (RTE_EVENT_QUEUE_ATTR_*).
1091  * @param attr_value
1092  *   The attribute value to set.
1093  *
1094  * @return
1095  *   - 0: Successfully set attribute.
1096  *   - <0: failed to set event queue attribute.
1097  *   -   -EINVAL: invalid device, queue or attr_id.
1098  *   -   -ENOTSUP: device does not support setting the event attribute.
1099  */
1100 int
1101 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
1102 			 uint64_t attr_value);
1103 
1104 /* Event port specific APIs */
1105 
1106 /* Event port configuration bitmap flags */
1107 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
1108 /**< Configure the port not to release outstanding events in
1109  * rte_event_dev_dequeue_burst(). If set, all events received through
1110  * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
1111  * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
1112  * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
1113  */
1114 #define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)
1115 /**< This event port links only to a single event queue.
1116  * The queue it links with should be similarly configured with the
1117  * @ref RTE_EVENT_QUEUE_CFG_SINGLE_LINK flag.
1118  *
1119  *  @see RTE_EVENT_QUEUE_CFG_SINGLE_LINK
1120  *  @see rte_event_port_setup(), rte_event_port_link()
1121  */
1122 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER       (1ULL << 2)
1123 /**< Hint that this event port will primarily enqueue events to the system.
1124  * A PMD can optimize its internal workings by assuming that this port is
1125  * primarily going to enqueue NEW events.
1126  *
1127  * Note that this flag is only a hint, so PMDs must operate under the
1128  * assumption that any port can enqueue an event with any type of op.
1129  *
1130  *  @see rte_event_port_setup()
1131  */
1132 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER       (1ULL << 3)
1133 /**< Hint that this event port will primarily dequeue events from the system.
1134  * A PMD can optimize its internal workings by assuming that this port is
1135  * primarily going to consume events, and not enqueue NEW or FORWARD
1136  * events.
1137  *
1138  * Note that this flag is only a hint, so PMDs must operate under the
1139  * assumption that any port can enqueue an event with any type of op.
1140  *
1141  *  @see rte_event_port_setup()
1142  */
1143 #define RTE_EVENT_PORT_CFG_HINT_WORKER         (1ULL << 4)
1144 /**< Hint that this event port will primarily pass existing events through.
1145  * A PMD can optimize its internal workings by assuming that this port is
1146  * primarily going to FORWARD events, and not enqueue NEW or RELEASE events
1147  * often.
1148  *
1149  * Note that this flag is only a hint, so PMDs must operate under the
1150  * assumption that any port can enqueue an event with any type of op.
1151  *
1152  *  @see rte_event_port_setup()
1153  */
1154 #define RTE_EVENT_PORT_CFG_INDEPENDENT_ENQ   (1ULL << 5)
1155 /**< Flag to enable independent enqueue. Must not be set if the device
1156  * is not RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ capable. This feature
1157  * allows an application to enqueue RTE_EVENT_OP_FORWARD or
1158  * RTE_EVENT_OP_RELEASE in an order different than the order the
1159  * events were dequeued from the event device, while maintaining
1160  * RTE_SCHED_TYPE_ATOMIC or RTE_SCHED_TYPE_ORDERED semantics.
1161  *
1162  * Note that this flag only matters for Eventdevs supporting burst mode.
1163  *
1164  *  @see rte_event_port_setup()
1165  */
1166 
1167 /** Event port configuration structure */
1168 struct rte_event_port_conf {
1169 	int32_t new_event_threshold;
1170 	/**< A backpressure threshold for new event enqueues on this port.
1171 	 * Use for *closed system* event dev where event capacity is limited,
1172 	 * and cannot exceed the capacity of the event dev.
1173 	 *
1174 	 * Configuring ports with different thresholds can make higher priority
1175 	 * traffic less likely to  be backpressured.
1176 	 * For example, a port used to inject NIC Rx packets into the event dev
1177 	 * can have a lower threshold so as not to overwhelm the device,
1178 	 * while ports used for worker pools can have a higher threshold.
1179 	 * This value cannot exceed the @ref rte_event_dev_config.nb_events_limit value
1180 	 * which was previously supplied to rte_event_dev_configure().
1181 	 *
1182 	 * This should be set to '-1' for *open system*, i.e when
1183 	 * @ref rte_event_dev_info.max_num_events == -1.
1184 	 */
1185 	uint16_t dequeue_depth;
1186 	/**< Configure the maximum size of burst dequeues for this event port.
1187 	 * This value cannot exceed the @ref rte_event_dev_config.nb_event_port_dequeue_depth value
1188 	 * which was previously supplied to rte_event_dev_configure().
1189 	 *
1190 	 * Ignored when device does not support the @ref RTE_EVENT_DEV_CAP_BURST_MODE capability.
1191 	 */
1192 	uint16_t enqueue_depth;
1193 	/**< Configure the maximum size of burst enqueues to this event port.
1194 	 * This value cannot exceed the @ref rte_event_dev_config.nb_event_port_enqueue_depth value
1195 	 * which was previously supplied to rte_event_dev_configure().
1196 	 *
1197 	 * Ignored when device does not support the @ref RTE_EVENT_DEV_CAP_BURST_MODE capability.
1198 	 */
1199 	uint32_t event_port_cfg; /**< Port configuration flags(EVENT_PORT_CFG_) */
1200 };
1201 
1202 /**
1203  * Retrieve the default configuration information of an event port designated
1204  * by its *port_id* from the event driver for an event device.
1205  *
1206  * This function is intended to be used in conjunction with rte_event_port_setup()
1207  * where the caller can set up the port by just overriding few default values.
1208  *
1209  * @param dev_id
1210  *   The identifier of the device.
1211  * @param port_id
1212  *   The index of the event port to get the configuration information.
1213  *   The value must be less than @ref rte_event_dev_config.nb_event_ports
1214  *   previously supplied to rte_event_dev_configure().
1215  * @param[out] port_conf
1216  *   The pointer to a structure to store the default event port configuration data.
1217  * @return
1218  *   - 0: Success, driver updates the default event port configuration data.
1219  *   - <0: Error code returned by the driver info get function.
1220  *      - -EINVAL - invalid input parameter.
1221  *      - -ENOTSUP - function is not supported for this device.
1222  *
1223  * @see rte_event_port_setup()
1224  */
1225 int
1226 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
1227 				struct rte_event_port_conf *port_conf);
1228 
1229 /**
1230  * Allocate and set up an event port for an event device.
1231  *
1232  * @param dev_id
1233  *   The identifier of the device.
1234  * @param port_id
1235  *   The index of the event port to setup. The value must be less than
1236  *   @ref rte_event_dev_config.nb_event_ports previously supplied to
1237  *   rte_event_dev_configure().
1238  * @param port_conf
1239  *   The pointer to the configuration data to be used for the port.
1240  *   NULL value is allowed, in which case the default configuration is used.
1241  *
1242  * @see rte_event_port_default_conf_get()
1243  *
1244  * @return
1245  *   - 0: Success, event port correctly set up.
1246  *   - <0: Port configuration failed.
1247  *     - -EINVAL - Invalid input parameter.
1248  *     - -EBUSY - Port already started.
1249  *     - -ENOTSUP - Function not supported on this device, or a NULL pointer passed
1250  *        as the port_conf parameter, and no default configuration function available
1251  *        for this device.
1252  *     - -EDQUOT - Application tried to link a queue configured
1253  *      with @ref RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event port.
1254  */
1255 int
1256 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
1257 		     const struct rte_event_port_conf *port_conf);
1258 
1259 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
1260 					  struct rte_event event, void *arg);
1261 /**< Callback function prototype that can be passed during
1262  * rte_event_port_release(), invoked once per a released event.
1263  */
1264 
1265 /**
1266  * Quiesce any core specific resources consumed by the event port.
1267  *
1268  * Event ports are generally coupled with lcores, and a given Hardware
1269  * implementation might require the PMD to store port specific data in the
1270  * lcore.
1271  * When the application decides to migrate the event port to another lcore
1272  * or teardown the current lcore it may to call `rte_event_port_quiesce`
1273  * to make sure that all the data associated with the event port are released
1274  * from the lcore, this might also include any prefetched events.
1275  * While releasing the event port from the lcore, this function calls the
1276  * user-provided flush callback once per event.
1277  *
1278  * @note Invocation of this API does not affect the existing port configuration.
1279  *
1280  * @param dev_id
1281  *   The identifier of the device.
1282  * @param port_id
1283  *   The index of the event port to quiesce. The value must be less than
1284  *   @ref rte_event_dev_config.nb_event_ports previously supplied to rte_event_dev_configure().
1285  * @param release_cb
1286  *   Callback function invoked once per flushed event.
1287  * @param args
1288  *   Argument supplied to callback.
1289  */
1290 void
1291 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
1292 		       rte_eventdev_port_flush_t release_cb, void *args);
1293 
1294 /**
1295  * Port attribute id for the maximum size of a burst enqueue operation supported on a port.
1296  */
1297 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
1298 /**
1299  * Port attribute id for the maximum size of a dequeue burst which can be returned from a port.
1300  */
1301 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
1302 /**
1303  * Port attribute id for the new event threshold of the port.
1304  * Once the number of events in the system exceeds this threshold, the enqueue of NEW-type
1305  * events will fail.
1306  */
1307 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
1308 /**
1309  * Port attribute id for the implicit release disable attribute of the port.
1310  */
1311 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
1312 
1313 /**
1314  * Get an attribute from a port.
1315  *
1316  * @param dev_id
1317  *   The identifier of the device.
1318  * @param port_id
1319  *   The index of the event port to query. The value must be less than
1320  *   @ref rte_event_dev_config.nb_event_ports previously supplied to rte_event_dev_configure().
1321  * @param attr_id
1322  *   The attribute ID to retrieve (RTE_EVENT_PORT_ATTR_*)
1323  * @param[out] attr_value
1324  *   A pointer that will be filled in with the attribute value if successful
1325  *
1326  * @return
1327  *   - 0: Successfully returned value.
1328  *   - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL.
1329  */
1330 int
1331 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1332 			uint32_t *attr_value);
1333 
1334 /**
1335  * Start an event device.
1336  *
1337  * The device start step is the last one in device setup, and enables the event
1338  * ports and queues to start accepting events and scheduling them to event ports.
1339  *
1340  * On success, all basic functions exported by the API (event enqueue,
1341  * event dequeue and so on) can be invoked.
1342  *
1343  * @param dev_id
1344  *   Event device identifier.
1345  * @return
1346  *   - 0: Success, device started.
1347  *   - -EINVAL:  Invalid device id provided.
1348  *   - -ENOTSUP: Device does not support this operation.
1349  *   - -ESTALE : Not all ports of the device are configured.
1350  *   - -ENOLINK: Not all queues are linked, which could lead to deadlock.
1351  */
1352 int
1353 rte_event_dev_start(uint8_t dev_id);
1354 
1355 /**
1356  * Stop an event device.
1357  *
1358  * This function causes all queued events to be drained, including those
1359  * residing in event ports. While draining events out of the device, this
1360  * function calls the user-provided flush callback (if one was registered) once
1361  * per event.
1362  *
1363  * The device can be restarted with a call to rte_event_dev_start(). Threads
1364  * that continue to enqueue/dequeue while the device is stopped, or being
1365  * stopped, will result in undefined behavior. This includes event adapters,
1366  * which must be stopped prior to stopping the eventdev.
1367  *
1368  * @param dev_id
1369  *   Event device identifier.
1370  *
1371  * @see rte_event_dev_stop_flush_callback_register()
1372  */
1373 void
1374 rte_event_dev_stop(uint8_t dev_id);
1375 
1376 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1377 					  struct rte_event event, void *arg);
1378 /**< Callback function called during rte_event_dev_stop(), invoked once per
1379  * flushed event.
1380  */
1381 
1382 /**
1383  * Registers a callback function to be invoked during rte_event_dev_stop() for
1384  * each flushed event. This function can be used to properly dispose of queued
1385  * events, for example events containing memory pointers.
1386  *
1387  * The callback function is only registered for the calling process. The
1388  * callback function must be registered in every process that can call
1389  * rte_event_dev_stop().
1390  *
1391  * Only one callback function may be registered. Each new call replaces
1392  * the existing registered callback function with the new function passed in.
1393  *
1394  * To unregister a callback, call this function with a NULL callback pointer.
1395  *
1396  * @param dev_id
1397  *   The identifier of the device.
1398  * @param callback
1399  *   Callback function to be invoked once per flushed event.
1400  *   Pass NULL to unset any previously-registered callback function.
1401  * @param userdata
1402  *   Argument supplied to callback.
1403  *
1404  * @return
1405  *  - 0 on success.
1406  *  - -EINVAL if *dev_id* is invalid.
1407  *
1408  * @see rte_event_dev_stop()
1409  */
1410 int rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1411 					       rte_eventdev_stop_flush_t callback, void *userdata);
1412 
1413 /**
1414  * Close an event device. The device cannot be restarted!
1415  *
1416  * @param dev_id
1417  *   Event device identifier.
1418  *
1419  * @return
1420  *  - 0 on successfully closing device
1421  *  - <0 on failure to close device.
1422  *    - -EINVAL - invalid device id.
1423  *    - -ENOTSUP - operation not supported for this device.
1424  *    - -EAGAIN - device is busy.
1425  */
1426 int
1427 rte_event_dev_close(uint8_t dev_id);
1428 
1429 /**
1430  * Event vector structure.
1431  */
1432 struct __rte_aligned(16) rte_event_vector {
1433 	uint16_t nb_elem;
1434 	/**< Number of elements valid in this event vector. */
1435 	uint16_t elem_offset : 12;
1436 	/**< Offset into the vector array where valid elements start from. */
1437 	uint16_t rsvd : 3;
1438 	/**< Reserved for future use */
1439 	uint16_t attr_valid : 1;
1440 	/**< Indicates that the below union attributes have valid information.
1441 	 */
1442 	union {
1443 		/* Used by Rx/Tx adapter.
1444 		 * Indicates that all the elements in this vector belong to the
1445 		 * same port and queue pair when originating from Rx adapter,
1446 		 * valid only when event type is ETHDEV_VECTOR or
1447 		 * ETH_RX_ADAPTER_VECTOR.
1448 		 * Can also be used to indicate the Tx adapter the destination
1449 		 * port and queue of the mbufs in the vector
1450 		 */
1451 		struct {
1452 			uint16_t port;   /**< Ethernet device port id. */
1453 			uint16_t queue;  /**< Ethernet device queue id. */
1454 		};
1455 	};
1456 	/**< Union to hold common attributes of the vector array. */
1457 	uint64_t impl_opaque;
1458 
1459 /* empty structures do not have zero size in C++ leading to compilation errors
1460  * with clang about structure having different sizes in C and C++.
1461  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1462  * C++ builds, removing the warning.
1463  */
1464 #ifndef __cplusplus
1465 	/**< Implementation specific opaque value.
1466 	 * An implementation may use this field to hold implementation specific
1467 	 * value to share between dequeue and enqueue operation.
1468 	 * The application should not modify this field.
1469 	 */
1470 	union __rte_aligned(16) {
1471 #endif
1472 		struct rte_mbuf *mbufs[0];
1473 		void *ptrs[0];
1474 		uint64_t u64s[0];
1475 #ifndef __cplusplus
1476 	};
1477 #endif
1478 	/**< Start of the vector array union. Depending upon the event type the
1479 	 * vector array can be an array of mbufs or pointers or opaque u64
1480 	 * values.
1481 	 */
1482 };
1483 
1484 /* Scheduler type definitions */
1485 #define RTE_SCHED_TYPE_ORDERED          0
1486 /**< Ordered scheduling
1487  *
1488  * Events from an ordered flow of an event queue can be scheduled to multiple
1489  * ports for concurrent processing while maintaining the original event order,
1490  * i.e. the order in which they were first enqueued to that queue.
1491  * This scheme allows events pertaining to the same, potentially large, flow to
1492  * be processed in parallel on multiple cores without incurring any
1493  * application-level order restoration logic overhead.
1494  *
1495  * After events are dequeued from a set of ports, as those events are re-enqueued
1496  * to another queue (with the op field set to @ref RTE_EVENT_OP_FORWARD), the event
1497  * device restores the original event order - including events returned from all
1498  * ports in the set - before the events are placed on the destination queue,
1499  * for subsequent scheduling to ports.
1500  *
1501  * Any events not forwarded i.e. dropped explicitly via RELEASE or implicitly
1502  * released by the next dequeue operation on a port, are skipped by the reordering
1503  * stage and do not affect the reordering of other returned events.
1504  *
1505  * Any NEW events sent on a port are not ordered with respect to FORWARD events sent
1506  * on the same port, since they have no original event order. They also are not
1507  * ordered with respect to NEW events enqueued on other ports.
1508  * However, NEW events to the same destination queue from the same port are guaranteed
1509  * to be enqueued in the order they were submitted via rte_event_enqueue_burst().
1510  *
1511  * NOTE:
1512  *   In restoring event order of forwarded events, the eventdev API guarantees that
1513  *   all events from the same flow (i.e. same @ref rte_event.flow_id,
1514  *   @ref rte_event.priority and @ref rte_event.queue_id) will be put in the original
1515  *   order before being forwarded to the destination queue.
1516  *   Some eventdevs may implement stricter ordering to achieve this aim,
1517  *   for example, restoring the order across *all* flows dequeued from the same ORDERED
1518  *   queue.
1519  *
1520  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1521  */
1522 
1523 #define RTE_SCHED_TYPE_ATOMIC           1
1524 /**< Atomic scheduling
1525  *
1526  * Events from an atomic flow, identified by a combination of @ref rte_event.flow_id,
1527  * @ref rte_event.queue_id and @ref rte_event.priority, can be scheduled only to a
1528  * single port at a time. The port is guaranteed to have exclusive (atomic)
1529  * access to the associated flow context, which enables the user to avoid SW
1530  * synchronization. Atomic flows also maintain event ordering
1531  * since only one port at a time can process events from each flow of an
1532  * event queue, and events within a flow are not reordered within the scheduler.
1533  *
1534  * An atomic flow is locked to a port when events from that flow are first
1535  * scheduled to that port. That lock remains in place until the
1536  * application calls rte_event_dequeue_burst() from the same port,
1537  * which implicitly releases the lock (if @ref RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL flag is not set).
1538  * User may allow the scheduler to release the lock earlier than that by invoking
1539  * rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation for each event from that flow.
1540  *
1541  * NOTE: Where multiple events from the same queue and atomic flow are scheduled to a port,
1542  * the lock for that flow is only released once the last event from the flow is released,
1543  * or forwarded to another queue. So long as there is at least one event from an atomic
1544  * flow scheduled to a port/core (including any events in the port's dequeue queue, not yet read
1545  * by the application), that port will hold the synchronization lock for that flow.
1546  *
1547  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1548  */
1549 
1550 #define RTE_SCHED_TYPE_PARALLEL         2
1551 /**< Parallel scheduling
1552  *
1553  * The scheduler performs priority scheduling, load balancing, etc. functions
1554  * but does not provide additional event synchronization or ordering.
1555  * It is free to schedule events from a single parallel flow of an event queue
1556  * to multiple events ports for concurrent processing.
1557  * The application is responsible for flow context synchronization and
1558  * event ordering (SW synchronization).
1559  *
1560  * @see rte_event_queue_setup(), rte_event_dequeue_burst()
1561  */
1562 
1563 /* Event types to classify the event source */
1564 #define RTE_EVENT_TYPE_ETHDEV           0x0
1565 /**< The event generated from ethdev subsystem */
1566 #define RTE_EVENT_TYPE_CRYPTODEV        0x1
1567 /**< The event generated from crypodev subsystem */
1568 #define RTE_EVENT_TYPE_TIMER		0x2
1569 /**< The event generated from event timer adapter */
1570 #define RTE_EVENT_TYPE_CPU              0x3
1571 /**< The event generated from cpu for pipelining.
1572  * Application may use *sub_event_type* to further classify the event
1573  */
1574 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER   0x4
1575 /**< The event generated from event eth Rx adapter */
1576 #define RTE_EVENT_TYPE_DMADEV           0x5
1577 /**< The event generated from dma subsystem */
1578 #define RTE_EVENT_TYPE_VECTOR           0x8
1579 /**< Indicates that event is a vector.
1580  * All vector event types should be a logical OR of EVENT_TYPE_VECTOR.
1581  * This simplifies the pipeline design as one can split processing the events
1582  * between vector events and normal event across event types.
1583  * Example:
1584  *	if (ev.event_type & RTE_EVENT_TYPE_VECTOR) {
1585  *		// Classify and handle vector event.
1586  *	} else {
1587  *		// Classify and handle event.
1588  *	}
1589  */
1590 #define RTE_EVENT_TYPE_ETHDEV_VECTOR                                           \
1591 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1592 /**< The event vector generated from ethdev subsystem */
1593 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1594 /**< The event vector generated from cpu for pipelining. */
1595 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR                                   \
1596 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1597 /**< The event vector generated from eth Rx adapter. */
1598 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
1599 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1600 /**< The event vector generated from cryptodev adapter. */
1601 
1602 #define RTE_EVENT_TYPE_MAX              0x10
1603 /**< Maximum number of event types */
1604 
1605 /* Event enqueue operations */
1606 #define RTE_EVENT_OP_NEW                0
1607 /**< The @ref rte_event.op field must be set to this operation type to inject a new event,
1608  * i.e. one not previously dequeued, into the event device, to be scheduled
1609  * for processing.
1610  */
1611 #define RTE_EVENT_OP_FORWARD            1
1612 /**< The application must set the @ref rte_event.op field to this operation type to return a
1613  * previously dequeued event to the event device to be scheduled for further processing.
1614  *
1615  * This event *must* be enqueued to the same port that the
1616  * event to be forwarded was dequeued from.
1617  *
1618  * The event's fields, including (but not limited to) flow_id, scheduling type,
1619  * destination queue, and event payload e.g. mbuf pointer, may all be updated as
1620  * desired by the application, but the @ref rte_event.impl_opaque field must
1621  * be kept to the same value as was present when the event was dequeued.
1622  */
1623 #define RTE_EVENT_OP_RELEASE            2
1624 /**< Release the flow context associated with the schedule type.
1625  *
1626  * If current flow's scheduler type method is @ref RTE_SCHED_TYPE_ATOMIC
1627  * then this operation type hints the scheduler that the user has completed critical
1628  * section processing for this event in the current atomic context, and that the
1629  * scheduler may unlock any atomic locks held for this event.
1630  * If this is the last event from an atomic flow, i.e. all flow locks are released
1631  * (see @ref RTE_SCHED_TYPE_ATOMIC for details), the scheduler is now allowed to
1632  * schedule events from that flow from to another port.
1633  * However, the atomic locks may be still held until the next rte_event_dequeue_burst()
1634  * call; enqueuing an event with opt type @ref RTE_EVENT_OP_RELEASE is a hint only,
1635  * allowing the scheduler to release the atomic locks early, but not requiring it to do so.
1636  *
1637  * Early atomic lock release may increase parallelism and thus system
1638  * performance, but the user needs to design carefully the split into critical
1639  * vs non-critical sections.
1640  *
1641  * If current flow's scheduler type method is @ref RTE_SCHED_TYPE_ORDERED
1642  * then this operation type informs the scheduler that the current event has
1643  * completed processing and will not be returned to the scheduler, i.e.
1644  * it has been dropped, and so the reordering context for that event
1645  * should be considered filled.
1646  *
1647  * Events with this operation type must only be enqueued to the same port that the
1648  * event to be released was dequeued from. The @ref rte_event.impl_opaque
1649  * field in the release event must have the same value as that in the original dequeued event.
1650  *
1651  * If a dequeued event is re-enqueued with operation type of @ref RTE_EVENT_OP_RELEASE,
1652  * then any subsequent enqueue of that event - or a copy of it - must be done as event of type
1653  * @ref RTE_EVENT_OP_NEW, not @ref RTE_EVENT_OP_FORWARD. This is because any context for
1654  * the originally dequeued event, i.e. atomic locks, or reorder buffer entries, will have
1655  * been removed or invalidated by the release operation.
1656  */
1657 
1658 /**
1659  * The generic *rte_event* structure to hold the event attributes
1660  * for dequeue and enqueue operation
1661  */
1662 struct rte_event {
1663 	/* WORD0 */
1664 	union {
1665 		uint64_t event;
1666 		/** Event attributes for dequeue or enqueue operation */
1667 		struct {
1668 			uint32_t flow_id:20;
1669 			/**< Target flow identifier for the enqueue and dequeue operation.
1670 			 *
1671 			 * For @ref RTE_SCHED_TYPE_ATOMIC, this field is used to identify a
1672 			 * flow for atomicity within a queue & priority level, such that events
1673 			 * from each individual flow will only be scheduled to one port at a time.
1674 			 *
1675 			 * This field is preserved between enqueue and dequeue when
1676 			 * a device reports the @ref RTE_EVENT_DEV_CAP_CARRY_FLOW_ID
1677 			 * capability. Otherwise the value is implementation dependent
1678 			 * on dequeue.
1679 			 */
1680 			uint32_t sub_event_type:8;
1681 			/**< Sub-event types based on the event source.
1682 			 *
1683 			 * This field is preserved between enqueue and dequeue.
1684 			 *
1685 			 * @see RTE_EVENT_TYPE_CPU
1686 			 */
1687 			uint32_t event_type:4;
1688 			/**< Event type to classify the event source. (RTE_EVENT_TYPE_*)
1689 			 *
1690 			 * This field is preserved between enqueue and dequeue
1691 			 */
1692 			uint8_t op:2;
1693 			/**< The type of event enqueue operation - new/forward/ etc.
1694 			 *
1695 			 * This field is *not* preserved across an instance
1696 			 * and is implementation dependent on dequeue.
1697 			 *
1698 			 * @see RTE_EVENT_OP_NEW
1699 			 * @see RTE_EVENT_OP_FORWARD
1700 			 * @see RTE_EVENT_OP_RELEASE
1701 			 */
1702 			uint8_t rsvd:4;
1703 			/**< Reserved for future use.
1704 			 *
1705 			 * Should be set to zero when initializing event structures.
1706 			 *
1707 			 * When forwarding or releasing existing events dequeued from the scheduler,
1708 			 * this field can be ignored.
1709 			 */
1710 			uint8_t sched_type:2;
1711 			/**< Scheduler synchronization type (RTE_SCHED_TYPE_*)
1712 			 * associated with flow id on a given event queue
1713 			 * for the enqueue and dequeue operation.
1714 			 *
1715 			 * This field is used to determine the scheduling type
1716 			 * for events sent to queues where @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES
1717 			 * is configured.
1718 			 * For queues where only a single scheduling type is available,
1719 			 * this field must be set to match the configured scheduling type.
1720 			 *
1721 			 * This field is preserved between enqueue and dequeue.
1722 			 *
1723 			 * @see RTE_SCHED_TYPE_ORDERED
1724 			 * @see RTE_SCHED_TYPE_ATOMIC
1725 			 * @see RTE_SCHED_TYPE_PARALLEL
1726 			 */
1727 			uint8_t queue_id;
1728 			/**< Targeted event queue identifier for the enqueue or
1729 			 * dequeue operation.
1730 			 * The value must be less than @ref rte_event_dev_config.nb_event_queues
1731 			 * which was previously supplied to rte_event_dev_configure().
1732 			 *
1733 			 * This field is preserved between enqueue on dequeue.
1734 			 */
1735 			uint8_t priority;
1736 			/**< Event priority relative to other events in the
1737 			 * event queue. The requested priority should in the
1738 			 * range of  [@ref RTE_EVENT_DEV_PRIORITY_HIGHEST,
1739 			 * @ref RTE_EVENT_DEV_PRIORITY_LOWEST].
1740 			 *
1741 			 * The implementation shall normalize the requested
1742 			 * priority to supported priority value.
1743 			 * [For devices with where the supported priority range is a power-of-2, the
1744 			 * normalization will be done via bit-shifting, so only the highest
1745 			 * log2(num_priorities) bits will be used by the event device]
1746 			 *
1747 			 * Valid when the device has @ref RTE_EVENT_DEV_CAP_EVENT_QOS capability
1748 			 * and this field is preserved between enqueue and dequeue,
1749 			 * though with possible loss of precision due to normalization and
1750 			 * subsequent de-normalization. (For example, if a device only supports 8
1751 			 * priority levels, only the high 3 bits of this field will be
1752 			 * used by that device, and hence only the value of those 3 bits are
1753 			 * guaranteed to be preserved between enqueue and dequeue.)
1754 			 *
1755 			 * Ignored when device does not support @ref RTE_EVENT_DEV_CAP_EVENT_QOS
1756 			 * capability, and it is implementation dependent if this field is preserved
1757 			 * between enqueue and dequeue.
1758 			 */
1759 			uint8_t impl_opaque;
1760 			/**< Opaque field for event device use.
1761 			 *
1762 			 * An event driver implementation may use this field to hold an
1763 			 * implementation specific value to share between
1764 			 * dequeue and enqueue operation.
1765 			 *
1766 			 * The application must not modify this field.
1767 			 * Its value is implementation dependent on dequeue,
1768 			 * and must be returned unmodified on enqueue when
1769 			 * op type is @ref RTE_EVENT_OP_FORWARD or @ref RTE_EVENT_OP_RELEASE.
1770 			 * This field is ignored on events with op type
1771 			 * @ref RTE_EVENT_OP_NEW.
1772 			 */
1773 		};
1774 	};
1775 	/* WORD1 */
1776 	union {
1777 		uint64_t u64;
1778 		/**< Opaque 64-bit value */
1779 		void *event_ptr;
1780 		/**< Opaque event pointer */
1781 		struct rte_mbuf *mbuf;
1782 		/**< mbuf pointer if dequeued event is associated with mbuf */
1783 		struct rte_event_vector *vec;
1784 		/**< Event vector pointer. */
1785 	};
1786 };
1787 
1788 /* Ethdev Rx adapter capability bitmap flags */
1789 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT	0x1
1790 /**< This flag is sent when the packet transfer mechanism is in HW.
1791  * Ethdev can send packets to the event device using internal event port.
1792  */
1793 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ	0x2
1794 /**< Adapter supports multiple event queues per ethdev. Every ethdev
1795  * Rx queue can be connected to a unique event queue.
1796  */
1797 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
1798 /**< The application can override the adapter generated flow ID in the
1799  * event. This flow ID can be specified when adding an ethdev Rx queue
1800  * to the adapter using the ev.flow_id member.
1801  * @see struct rte_event_eth_rx_adapter_queue_conf::ev
1802  * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
1803  */
1804 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR	0x8
1805 /**< Adapter supports event vectorization per ethdev. */
1806 
1807 /**
1808  * Retrieve the event device's ethdev Rx adapter capabilities for the
1809  * specified ethernet port
1810  *
1811  * @param dev_id
1812  *   The identifier of the device.
1813  *
1814  * @param eth_port_id
1815  *   The identifier of the ethernet device.
1816  *
1817  * @param[out] caps
1818  *   A pointer to memory filled with Rx event adapter capabilities.
1819  *
1820  * @return
1821  *   - 0: Success, driver provides Rx event adapter capabilities for the
1822  *	ethernet device.
1823  *   - <0: Error code returned by the driver function.
1824  */
1825 int
1826 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1827 				uint32_t *caps);
1828 
1829 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1830 /**< This flag is set when the timer mechanism is in HW. */
1831 
1832 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC      (1ULL << 1)
1833 /**< This flag is set if periodic mode is supported. */
1834 
1835 /**
1836  * Retrieve the event device's timer adapter capabilities.
1837  *
1838  * @param dev_id
1839  *   The identifier of the device.
1840  *
1841  * @param[out] caps
1842  *   A pointer to memory to be filled with event timer adapter capabilities.
1843  *
1844  * @return
1845  *   - 0: Success, driver provided event timer adapter capabilities.
1846  *   - <0: Error code returned by the driver function.
1847  */
1848 int
1849 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1850 
1851 /* Crypto adapter capability bitmap flag */
1852 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW   0x1
1853 /**< Flag indicates HW is capable of generating events in
1854  * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send
1855  * packets to the event device as new events using an internal
1856  * event port.
1857  */
1858 
1859 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD   0x2
1860 /**< Flag indicates HW is capable of generating events in
1861  * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send
1862  * packets to the event device as forwarded event using an
1863  * internal event port.
1864  */
1865 
1866 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND  0x4
1867 /**< Flag indicates HW is capable of mapping crypto queue pair to
1868  * event queue.
1869  */
1870 
1871 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA   0x8
1872 /**< Flag indicates HW/SW supports a mechanism to store and retrieve
1873  * the private data information along with the crypto session.
1874  */
1875 
1876 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
1877 /**< Flag indicates HW is capable of aggregating processed
1878  * crypto operations into rte_event_vector.
1879  */
1880 
1881 /**
1882  * Retrieve the event device's crypto adapter capabilities for the
1883  * specified cryptodev device
1884  *
1885  * @param dev_id
1886  *   The identifier of the device.
1887  *
1888  * @param cdev_id
1889  *   The identifier of the cryptodev device.
1890  *
1891  * @param[out] caps
1892  *   A pointer to memory filled with event adapter capabilities.
1893  *   It is expected to be pre-allocated & initialized by caller.
1894  *
1895  * @return
1896  *   - 0: Success, driver provides event adapter capabilities for the
1897  *     cryptodev device.
1898  *   - <0: Error code returned by the driver function.
1899  */
1900 int
1901 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1902 				  uint32_t *caps);
1903 
1904 /* DMA adapter capability bitmap flag */
1905 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1906 /**< Flag indicates HW is capable of generating events in
1907  * RTE_EVENT_OP_NEW enqueue operation. DMADEV will send
1908  * packets to the event device as new events using an
1909  * internal event port.
1910  */
1911 
1912 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1913 /**< Flag indicates HW is capable of generating events in
1914  * RTE_EVENT_OP_FORWARD enqueue operation. DMADEV will send
1915  * packets to the event device as forwarded event using an
1916  * internal event port.
1917  */
1918 
1919 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1920 /**< Flag indicates HW is capable of mapping DMA vchan to event queue. */
1921 
1922 /**
1923  * Retrieve the event device's DMA adapter capabilities for the
1924  * specified dmadev device
1925  *
1926  * @param dev_id
1927  *   The identifier of the device.
1928  *
1929  * @param dmadev_id
1930  *   The identifier of the dmadev device.
1931  *
1932  * @param[out] caps
1933  *   A pointer to memory filled with event adapter capabilities.
1934  *   It is expected to be pre-allocated & initialized by caller.
1935  *
1936  * @return
1937  *   - 0: Success, driver provides event adapter capabilities for the
1938  *     dmadev device.
1939  *   - <0: Error code returned by the driver function.
1940  *
1941  */
1942 __rte_experimental
1943 int
1944 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1945 
1946 /* Ethdev Tx adapter capability bitmap flags */
1947 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT	0x1
1948 /**< This flag is sent when the PMD supports a packet transmit callback
1949  */
1950 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR	0x2
1951 /**< Indicates that the Tx adapter is capable of handling event vector of
1952  * mbufs.
1953  */
1954 
1955 /**
1956  * Retrieve the event device's eth Tx adapter capabilities
1957  *
1958  * @param dev_id
1959  *   The identifier of the device.
1960  *
1961  * @param eth_port_id
1962  *   The identifier of the ethernet device.
1963  *
1964  * @param[out] caps
1965  *   A pointer to memory filled with eth Tx adapter capabilities.
1966  *
1967  * @return
1968  *   - 0: Success, driver provides eth Tx adapter capabilities.
1969  *   - <0: Error code returned by the driver function.
1970  */
1971 int
1972 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1973 				uint32_t *caps);
1974 
1975 /**
1976  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
1977  *
1978  * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag
1979  * then application can use this function to convert timeout value in
1980  * nanoseconds to implementations specific timeout value supplied in
1981  * rte_event_dequeue_burst()
1982  *
1983  * @param dev_id
1984  *   The identifier of the device.
1985  * @param ns
1986  *   Wait time in nanosecond
1987  * @param[out] timeout_ticks
1988  *   Value for the *timeout_ticks* parameter in rte_event_dequeue_burst()
1989  *
1990  * @return
1991  *  - 0 on success.
1992  *  - -ENOTSUP if the device doesn't support timeouts
1993  *  - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL
1994  *  - other values < 0 on failure.
1995  *
1996  * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
1997  * @see rte_event_dev_configure()
1998  */
1999 int
2000 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
2001 					uint64_t *timeout_ticks);
2002 
2003 /**
2004  * Link multiple source event queues supplied in *queues* to the destination
2005  * event port designated by its *port_id* with associated service priority
2006  * supplied in *priorities* on the event device designated by its *dev_id*.
2007  *
2008  * The link establishment shall enable the event port *port_id* from
2009  * receiving events from the specified event queue(s) supplied in *queues*
2010  *
2011  * An event queue may link to one or more event ports.
2012  * The number of links can be established from an event queue to event port is
2013  * implementation defined.
2014  *
2015  * Event queue(s) to event port link establishment can be changed at runtime
2016  * without re-configuring the device to support scaling and to reduce the
2017  * latency of critical work by establishing the link with more event ports
2018  * at runtime.
2019  *
2020  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
2021  * than or equal to one, this function links the event queues to the default
2022  * profile_id i.e. profile_id 0 of the event port.
2023  *
2024  * @param dev_id
2025  *   The identifier of the device.
2026  *
2027  * @param port_id
2028  *   Event port identifier to select the destination port to link.
2029  *
2030  * @param queues
2031  *   Points to an array of *nb_links* event queues to be linked
2032  *   to the event port.
2033  *   NULL value is allowed, in which case this function links all the configured
2034  *   event queues *nb_event_queues* which previously supplied to
2035  *   rte_event_dev_configure() to the event port *port_id*
2036  *
2037  * @param priorities
2038  *   Points to an array of *nb_links* service priorities associated with each
2039  *   event queue link to event port.
2040  *   The priority defines the event port's servicing priority for
2041  *   event queue, which may be ignored by an implementation.
2042  *   The requested priority should in the range of
2043  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
2044  *   The implementation shall normalize the requested priority to
2045  *   implementation supported priority value.
2046  *   NULL value is allowed, in which case this function links the event queues
2047  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
2048  *
2049  * @param nb_links
2050  *   The number of links to establish. This parameter is ignored if queues is
2051  *   NULL.
2052  *
2053  * @return
2054  * The number of links actually established. The return value can be less than
2055  * the value of the *nb_links* parameter when the implementation has the
2056  * limitation on specific queue to port link establishment or if invalid
2057  * parameters are specified in *queues*
2058  * If the return value is less than *nb_links*, the remaining links at the end
2059  * of link[] are not established, and the caller has to take care of them.
2060  * If return value is less than *nb_links* then implementation shall update the
2061  * rte_errno accordingly, Possible rte_errno values are
2062  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
2063  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
2064  * (EINVAL) Invalid parameter
2065  */
2066 int
2067 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
2068 		    const uint8_t queues[], const uint8_t priorities[],
2069 		    uint16_t nb_links);
2070 
2071 /**
2072  * Unlink multiple source event queues supplied in *queues* from the destination
2073  * event port designated by its *port_id* on the event device designated
2074  * by its *dev_id*.
2075  *
2076  * The unlink call issues an async request to disable the event port *port_id*
2077  * from receiving events from the specified event queue *queue_id*.
2078  * Event queue(s) to event port unlink establishment can be changed at runtime
2079  * without re-configuring the device.
2080  *
2081  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
2082  * than or equal to one, this function unlinks the event queues from the default
2083  * profile identifier i.e. profile 0 of the event port.
2084  *
2085  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
2086  *
2087  * @param dev_id
2088  *   The identifier of the device.
2089  *
2090  * @param port_id
2091  *   Event port identifier to select the destination port to unlink.
2092  *
2093  * @param queues
2094  *   Points to an array of *nb_unlinks* event queues to be unlinked
2095  *   from the event port.
2096  *   NULL value is allowed, in which case this function unlinks all the
2097  *   event queue(s) from the event port *port_id*.
2098  *
2099  * @param nb_unlinks
2100  *   The number of unlinks to establish. This parameter is ignored if queues is
2101  *   NULL.
2102  *
2103  * @return
2104  * The number of unlinks successfully requested. The return value can be less
2105  * than the value of the *nb_unlinks* parameter when the implementation has the
2106  * limitation on specific queue to port unlink establishment or
2107  * if invalid parameters are specified.
2108  * If the return value is less than *nb_unlinks*, the remaining queues at the
2109  * end of queues[] are not unlinked, and the caller has to take care of them.
2110  * If return value is less than *nb_unlinks* then implementation shall update
2111  * the rte_errno accordingly, Possible rte_errno values are
2112  * (EINVAL) Invalid parameter
2113  */
2114 int
2115 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
2116 		      uint8_t queues[], uint16_t nb_unlinks);
2117 
2118 /**
2119  * Link multiple source event queues supplied in *queues* to the destination
2120  * event port designated by its *port_id* with associated profile identifier
2121  * supplied in *profile_id* with service priorities supplied in *priorities*
2122  * on the event device designated by its *dev_id*.
2123  *
2124  * If *profile_id* is set to 0 then, the links created by the call `rte_event_port_link`
2125  * will be overwritten.
2126  *
2127  * Event ports by default use profile_id 0 unless it is changed using the
2128  * call ``rte_event_port_profile_switch()``.
2129  *
2130  * The link establishment shall enable the event port *port_id* from
2131  * receiving events from the specified event queue(s) supplied in *queues*
2132  *
2133  * An event queue may link to one or more event ports.
2134  * The number of links can be established from an event queue to event port is
2135  * implementation defined.
2136  *
2137  * Event queue(s) to event port link establishment can be changed at runtime
2138  * without re-configuring the device to support scaling and to reduce the
2139  * latency of critical work by establishing the link with more event ports
2140  * at runtime.
2141  *
2142  * @param dev_id
2143  *   The identifier of the device.
2144  *
2145  * @param port_id
2146  *   Event port identifier to select the destination port to link.
2147  *
2148  * @param queues
2149  *   Points to an array of *nb_links* event queues to be linked
2150  *   to the event port.
2151  *   NULL value is allowed, in which case this function links all the configured
2152  *   event queues *nb_event_queues* which previously supplied to
2153  *   rte_event_dev_configure() to the event port *port_id*
2154  *
2155  * @param priorities
2156  *   Points to an array of *nb_links* service priorities associated with each
2157  *   event queue link to event port.
2158  *   The priority defines the event port's servicing priority for
2159  *   event queue, which may be ignored by an implementation.
2160  *   The requested priority should in the range of
2161  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
2162  *   The implementation shall normalize the requested priority to
2163  *   implementation supported priority value.
2164  *   NULL value is allowed, in which case this function links the event queues
2165  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
2166  *
2167  * @param nb_links
2168  *   The number of links to establish. This parameter is ignored if queues is
2169  *   NULL.
2170  *
2171  * @param profile_id
2172  *   The profile identifier associated with the links between event queues and
2173  *   event port. Should be less than the max capability reported by
2174  *   ``rte_event_dev_info::max_profiles_per_port``
2175  *
2176  * @return
2177  * The number of links actually established. The return value can be less than
2178  * the value of the *nb_links* parameter when the implementation has the
2179  * limitation on specific queue to port link establishment or if invalid
2180  * parameters are specified in *queues*
2181  * If the return value is less than *nb_links*, the remaining links at the end
2182  * of link[] are not established, and the caller has to take care of them.
2183  * If return value is less than *nb_links* then implementation shall update the
2184  * rte_errno accordingly, Possible rte_errno values are
2185  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
2186  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
2187  * (EINVAL) Invalid parameter
2188  *
2189  */
2190 __rte_experimental
2191 int
2192 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
2193 				 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
2194 
2195 /**
2196  * Unlink multiple source event queues supplied in *queues* that belong to profile
2197  * designated by *profile_id* from the destination event port designated by its
2198  * *port_id* on the event device designated by its *dev_id*.
2199  *
2200  * If *profile_id* is set to 0 i.e., the default profile then, then this function
2201  * will act as ``rte_event_port_unlink``.
2202  *
2203  * The unlink call issues an async request to disable the event port *port_id*
2204  * from receiving events from the specified event queue *queue_id*.
2205  * Event queue(s) to event port unlink establishment can be changed at runtime
2206  * without re-configuring the device.
2207  *
2208  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
2209  *
2210  * @param dev_id
2211  *   The identifier of the device.
2212  *
2213  * @param port_id
2214  *   Event port identifier to select the destination port to unlink.
2215  *
2216  * @param queues
2217  *   Points to an array of *nb_unlinks* event queues to be unlinked
2218  *   from the event port.
2219  *   NULL value is allowed, in which case this function unlinks all the
2220  *   event queue(s) from the event port *port_id*.
2221  *
2222  * @param nb_unlinks
2223  *   The number of unlinks to establish. This parameter is ignored if queues is
2224  *   NULL.
2225  *
2226  * @param profile_id
2227  *   The profile identifier associated with the links between event queues and
2228  *   event port. Should be less than the max capability reported by
2229  *   ``rte_event_dev_info::max_profiles_per_port``
2230  *
2231  * @return
2232  * The number of unlinks successfully requested. The return value can be less
2233  * than the value of the *nb_unlinks* parameter when the implementation has the
2234  * limitation on specific queue to port unlink establishment or
2235  * if invalid parameters are specified.
2236  * If the return value is less than *nb_unlinks*, the remaining queues at the
2237  * end of queues[] are not unlinked, and the caller has to take care of them.
2238  * If return value is less than *nb_unlinks* then implementation shall update
2239  * the rte_errno accordingly, Possible rte_errno values are
2240  * (EINVAL) Invalid parameter
2241  *
2242  */
2243 __rte_experimental
2244 int
2245 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2246 			      uint16_t nb_unlinks, uint8_t profile_id);
2247 
2248 /**
2249  * Returns the number of unlinks in progress.
2250  *
2251  * This function provides the application with a method to detect when an
2252  * unlink has been completed by the implementation.
2253  *
2254  * @see rte_event_port_unlink() to issue unlink requests.
2255  *
2256  * @param dev_id
2257  *   The identifier of the device.
2258  *
2259  * @param port_id
2260  *   Event port identifier to select port to check for unlinks in progress.
2261  *
2262  * @return
2263  * The number of unlinks that are in progress. A return of zero indicates that
2264  * there are no outstanding unlink requests. A positive return value indicates
2265  * the number of unlinks that are in progress, but are not yet complete.
2266  * A negative return value indicates an error, -EINVAL indicates an invalid
2267  * parameter passed for *dev_id* or *port_id*.
2268  */
2269 int
2270 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
2271 
2272 /**
2273  * Retrieve the list of source event queues and its associated service priority
2274  * linked to the destination event port designated by its *port_id*
2275  * on the event device designated by its *dev_id*.
2276  *
2277  * @param dev_id
2278  *   The identifier of the device.
2279  *
2280  * @param port_id
2281  *   Event port identifier.
2282  *
2283  * @param[out] queues
2284  *   Points to an array of *queues* for output.
2285  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2286  *   store the event queue(s) linked with event port *port_id*
2287  *
2288  * @param[out] priorities
2289  *   Points to an array of *priorities* for output.
2290  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2291  *   store the service priority associated with each event queue linked
2292  *
2293  * @return
2294  * The number of links established on the event port designated by its
2295  *  *port_id*.
2296  * - <0 on failure.
2297  */
2298 int
2299 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
2300 			 uint8_t queues[], uint8_t priorities[]);
2301 
2302 /**
2303  * Retrieve the list of source event queues and its service priority
2304  * associated to a *profile_id* and linked to the destination event port
2305  * designated by its *port_id* on the event device designated by its *dev_id*.
2306  *
2307  * @param dev_id
2308  *   The identifier of the device.
2309  *
2310  * @param port_id
2311  *   Event port identifier.
2312  *
2313  * @param[out] queues
2314  *   Points to an array of *queues* for output.
2315  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2316  *   store the event queue(s) linked with event port *port_id*
2317  *
2318  * @param[out] priorities
2319  *   Points to an array of *priorities* for output.
2320  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2321  *   store the service priority associated with each event queue linked
2322  *
2323  * @param profile_id
2324  *   The profile identifier associated with the links between event queues and
2325  *   event port. Should be less than the max capability reported by
2326  *   ``rte_event_dev_info::max_profiles_per_port``
2327  *
2328  * @return
2329  * The number of links established on the event port designated by its
2330  *  *port_id*.
2331  * - <0 on failure.
2332  */
2333 __rte_experimental
2334 int
2335 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2336 				 uint8_t priorities[], uint8_t profile_id);
2337 
2338 /**
2339  * Retrieve the service ID of the event dev. If the adapter doesn't use
2340  * a rte_service function, this function returns -ESRCH.
2341  *
2342  * @param dev_id
2343  *   The identifier of the device.
2344  *
2345  * @param [out] service_id
2346  *   A pointer to a uint32_t, to be filled in with the service id.
2347  *
2348  * @return
2349  *   - 0: Success
2350  *   - <0: Error code on failure, if the event dev doesn't use a rte_service
2351  *   function, this function returns -ESRCH.
2352  */
2353 int
2354 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
2355 
2356 /**
2357  * Dump internal information about *dev_id* to the FILE* provided in *f*.
2358  *
2359  * @param dev_id
2360  *   The identifier of the device.
2361  *
2362  * @param f
2363  *   A pointer to a file for output
2364  *
2365  * @return
2366  *   - 0: on success
2367  *   - <0: on failure.
2368  */
2369 int
2370 rte_event_dev_dump(uint8_t dev_id, FILE *f);
2371 
2372 /** Maximum name length for extended statistics counters */
2373 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
2374 
2375 /**
2376  * Selects the component of the eventdev to retrieve statistics from.
2377  */
2378 enum rte_event_dev_xstats_mode {
2379 	RTE_EVENT_DEV_XSTATS_DEVICE,
2380 	RTE_EVENT_DEV_XSTATS_PORT,
2381 	RTE_EVENT_DEV_XSTATS_QUEUE,
2382 };
2383 
2384 /**
2385  * A name-key lookup element for extended statistics.
2386  *
2387  * This structure is used to map between names and ID numbers
2388  * for extended ethdev statistics.
2389  */
2390 struct rte_event_dev_xstats_name {
2391 	char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
2392 };
2393 
2394 /**
2395  * Retrieve names of extended statistics of an event device.
2396  *
2397  * @param dev_id
2398  *   The identifier of the event device.
2399  * @param mode
2400  *   The mode of statistics to retrieve. Choices include the device statistics,
2401  *   port statistics or queue statistics.
2402  * @param queue_port_id
2403  *   Used to specify the port or queue number in queue or port mode, and is
2404  *   ignored in device mode.
2405  * @param[out] xstats_names
2406  *   Block of memory to insert names into. Must be at least size in capacity.
2407  *   If set to NULL, function returns required capacity.
2408  * @param[out] ids
2409  *   Block of memory to insert ids into. Must be at least size in capacity.
2410  *   If set to NULL, function returns required capacity. The id values returned
2411  *   can be passed to *rte_event_dev_xstats_get* to select statistics.
2412  * @param size
2413  *   Capacity of xstats_names (number of names).
2414  * @return
2415  *   - positive value lower or equal to size: success. The return value
2416  *     is the number of entries filled in the stats table.
2417  *   - positive value higher than size: error, the given statistics table
2418  *     is too small. The return value corresponds to the size that should
2419  *     be given to succeed. The entries in the table are not valid and
2420  *     shall not be used by the caller.
2421  *   - negative value on error:
2422  *        -ENODEV for invalid *dev_id*
2423  *        -EINVAL for invalid mode, queue port or id parameters
2424  *        -ENOTSUP if the device doesn't support this function.
2425  */
2426 int
2427 rte_event_dev_xstats_names_get(uint8_t dev_id,
2428 			       enum rte_event_dev_xstats_mode mode,
2429 			       uint8_t queue_port_id,
2430 			       struct rte_event_dev_xstats_name *xstats_names,
2431 			       uint64_t *ids,
2432 			       unsigned int size);
2433 
2434 /**
2435  * Retrieve extended statistics of an event device.
2436  *
2437  * @param dev_id
2438  *   The identifier of the device.
2439  * @param mode
2440  *  The mode of statistics to retrieve. Choices include the device statistics,
2441  *  port statistics or queue statistics.
2442  * @param queue_port_id
2443  *   Used to specify the port or queue number in queue or port mode, and is
2444  *   ignored in device mode.
2445  * @param ids
2446  *   The id numbers of the stats to get. The ids can be got from the stat
2447  *   position in the stat list from rte_event_dev_get_xstats_names(), or
2448  *   by using rte_event_dev_xstats_by_name_get().
2449  * @param[out] values
2450  *   The values for each stats request by ID.
2451  * @param n
2452  *   The number of stats requested
2453  * @return
2454  *   - positive value: number of stat entries filled into the values array
2455  *   - negative value on error:
2456  *        -ENODEV for invalid *dev_id*
2457  *        -EINVAL for invalid mode, queue port or id parameters
2458  *        -ENOTSUP if the device doesn't support this function.
2459  */
2460 int
2461 rte_event_dev_xstats_get(uint8_t dev_id,
2462 			 enum rte_event_dev_xstats_mode mode,
2463 			 uint8_t queue_port_id,
2464 			 const uint64_t ids[],
2465 			 uint64_t values[], unsigned int n);
2466 
2467 /**
2468  * Retrieve the value of a single stat by requesting it by name.
2469  *
2470  * @param dev_id
2471  *   The identifier of the device
2472  * @param name
2473  *   The stat name to retrieve
2474  * @param[out] id
2475  *   If non-NULL, the numerical id of the stat will be returned, so that further
2476  *   requests for the stat can be got using rte_event_dev_xstats_get, which will
2477  *   be faster as it doesn't need to scan a list of names for the stat.
2478  *   If the stat cannot be found, the id returned will be (unsigned)-1.
2479  * @return
2480  *   - positive value or zero: the stat value
2481  *   - negative value: -EINVAL if stat not found, -ENOTSUP if not supported.
2482  */
2483 uint64_t
2484 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2485 				 uint64_t *id);
2486 
2487 /**
2488  * Reset the values of the xstats of the selected component in the device.
2489  *
2490  * @param dev_id
2491  *   The identifier of the device
2492  * @param mode
2493  *   The mode of the statistics to reset. Choose from device, queue or port.
2494  * @param queue_port_id
2495  *   The queue or port to reset. 0 and positive values select ports and queues,
2496  *   while -1 indicates all ports or queues.
2497  * @param ids
2498  *   Selects specific statistics to be reset. When NULL, all statistics selected
2499  *   by *mode* will be reset. If non-NULL, must point to array of at least
2500  *   *nb_ids* size.
2501  * @param nb_ids
2502  *   The number of ids available from the *ids* array. Ignored when ids is NULL.
2503  * @return
2504  *   - zero: successfully reset the statistics to zero
2505  *   - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported.
2506  */
2507 int
2508 rte_event_dev_xstats_reset(uint8_t dev_id,
2509 			   enum rte_event_dev_xstats_mode mode,
2510 			   int16_t queue_port_id,
2511 			   const uint64_t ids[],
2512 			   uint32_t nb_ids);
2513 
2514 /**
2515  * Trigger the eventdev self test.
2516  *
2517  * @param dev_id
2518  *   The identifier of the device
2519  * @return
2520  *   - 0: Selftest successful
2521  *   - -ENOTSUP if the device doesn't support selftest
2522  *   - other values < 0 on failure.
2523  */
2524 int rte_event_dev_selftest(uint8_t dev_id);
2525 
2526 /**
2527  * Get the memory required per event vector based on the number of elements per
2528  * vector.
2529  * This should be used to create the mempool that holds the event vectors.
2530  *
2531  * @param name
2532  *   The name of the vector pool.
2533  * @param n
2534  *   The number of elements in the mbuf pool.
2535  * @param cache_size
2536  *   Size of the per-core object cache. See rte_mempool_create() for
2537  *   details.
2538  * @param nb_elem
2539  *   The number of elements that a single event vector should be able to hold.
2540  * @param socket_id
2541  *   The socket identifier where the memory should be allocated. The
2542  *   value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the
2543  *   reserved zone
2544  *
2545  * @return
2546  *   The pointer to the newly allocated mempool, on success. NULL on error
2547  *   with rte_errno set appropriately. Possible rte_errno values include:
2548  *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
2549  *    - E_RTE_SECONDARY - function was called from a secondary process instance
2550  *    - EINVAL - cache size provided is too large, or priv_size is not aligned.
2551  *    - ENOSPC - the maximum number of memzones has already been allocated
2552  *    - EEXIST - a memzone with the same name already exists
2553  *    - ENOMEM - no appropriate memory area found in which to create memzone
2554  *    - ENAMETOOLONG - mempool name requested is too long.
2555  */
2556 struct rte_mempool *
2557 rte_event_vector_pool_create(const char *name, unsigned int n,
2558 			     unsigned int cache_size, uint16_t nb_elem,
2559 			     int socket_id);
2560 
2561 #include <rte_eventdev_core.h>
2562 
2563 #ifdef __cplusplus
2564 extern "C" {
2565 #endif
2566 
2567 static __rte_always_inline uint16_t
2568 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2569 			  const struct rte_event ev[], uint16_t nb_events,
2570 			  const event_enqueue_burst_t fn)
2571 {
2572 	const struct rte_event_fp_ops *fp_ops;
2573 	void *port;
2574 
2575 	fp_ops = &rte_event_fp_ops[dev_id];
2576 	port = fp_ops->data[port_id];
2577 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2578 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2579 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2580 		rte_errno = EINVAL;
2581 		return 0;
2582 	}
2583 
2584 	if (port == NULL) {
2585 		rte_errno = EINVAL;
2586 		return 0;
2587 	}
2588 #endif
2589 	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2590 	/*
2591 	 * Allow zero cost non burst mode routine invocation if application
2592 	 * requests nb_events as const one
2593 	 */
2594 	if (nb_events == 1)
2595 		return (fp_ops->enqueue)(port, ev);
2596 	else
2597 		return fn(port, ev, nb_events);
2598 }
2599 
2600 /**
2601  * Enqueue a burst of events objects or an event object supplied in *rte_event*
2602  * structure on an  event device designated by its *dev_id* through the event
2603  * port specified by *port_id*. Each event object specifies the event queue on
2604  * which it will be enqueued.
2605  *
2606  * The *nb_events* parameter is the number of event objects to enqueue which are
2607  * supplied in the *ev* array of *rte_event* structure.
2608  *
2609  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2610  * enqueued to the same port that their associated events were dequeued from.
2611  *
2612  * The rte_event_enqueue_burst() function returns the number of
2613  * events objects it actually enqueued. A return value equal to *nb_events*
2614  * means that all event objects have been enqueued.
2615  *
2616  * @param dev_id
2617  *   The identifier of the device.
2618  * @param port_id
2619  *   The identifier of the event port.
2620  * @param ev
2621  *   Points to an array of *nb_events* objects of type *rte_event* structure
2622  *   which contain the event object enqueue operations to be processed.
2623  * @param nb_events
2624  *   The number of event objects to enqueue, typically number of
2625  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2626  *   available for this port.
2627  *
2628  * @return
2629  *   The number of event objects actually enqueued on the event device. The
2630  *   return value can be less than the value of the *nb_events* parameter when
2631  *   the event devices queue is full or if invalid parameters are specified in a
2632  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2633  *   events at the end of ev[] are not consumed and the caller has to take care
2634  *   of them, and rte_errno is set accordingly. Possible errno values include:
2635  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2636  *              ID is invalid, or an event's sched type doesn't match the
2637  *              capabilities of the destination queue.
2638  *   - ENOSPC   The event port was backpressured and unable to enqueue
2639  *              one or more events. This error code is only applicable to
2640  *              closed systems.
2641  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2642  */
2643 static inline uint16_t
2644 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2645 			const struct rte_event ev[], uint16_t nb_events)
2646 {
2647 	const struct rte_event_fp_ops *fp_ops;
2648 
2649 	fp_ops = &rte_event_fp_ops[dev_id];
2650 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2651 					 fp_ops->enqueue_burst);
2652 }
2653 
2654 /**
2655  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
2656  * an event device designated by its *dev_id* through the event port specified
2657  * by *port_id*.
2658  *
2659  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2660  * application can use this API when the all objects in the burst contains
2661  * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
2662  * function can provide the additional hint to the PMD and optimize if possible.
2663  *
2664  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2665  * has event object of operation type != RTE_EVENT_OP_NEW.
2666  *
2667  * @param dev_id
2668  *   The identifier of the device.
2669  * @param port_id
2670  *   The identifier of the event port.
2671  * @param ev
2672  *   Points to an array of *nb_events* objects of type *rte_event* structure
2673  *   which contain the event object enqueue operations to be processed.
2674  * @param nb_events
2675  *   The number of event objects to enqueue, typically number of
2676  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2677  *   available for this port.
2678  *
2679  * @return
2680  *   The number of event objects actually enqueued on the event device. The
2681  *   return value can be less than the value of the *nb_events* parameter when
2682  *   the event devices queue is full or if invalid parameters are specified in a
2683  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2684  *   events at the end of ev[] are not consumed and the caller has to take care
2685  *   of them, and rte_errno is set accordingly. Possible errno values include:
2686  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2687  *              ID is invalid, or an event's sched type doesn't match the
2688  *              capabilities of the destination queue.
2689  *   - ENOSPC   The event port was backpressured and unable to enqueue
2690  *              one or more events. This error code is only applicable to
2691  *              closed systems.
2692  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2693  * @see rte_event_enqueue_burst()
2694  */
2695 static inline uint16_t
2696 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2697 			    const struct rte_event ev[], uint16_t nb_events)
2698 {
2699 	const struct rte_event_fp_ops *fp_ops;
2700 
2701 	fp_ops = &rte_event_fp_ops[dev_id];
2702 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2703 					 fp_ops->enqueue_new_burst);
2704 }
2705 
2706 /**
2707  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
2708  * on an event device designated by its *dev_id* through the event port
2709  * specified by *port_id*.
2710  *
2711  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2712  * application can use this API when the all objects in the burst contains
2713  * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
2714  * function can provide the additional hint to the PMD and optimize if possible.
2715  *
2716  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2717  * has event object of operation type != RTE_EVENT_OP_FORWARD.
2718  *
2719  * @param dev_id
2720  *   The identifier of the device.
2721  * @param port_id
2722  *   The identifier of the event port.
2723  * @param ev
2724  *   Points to an array of *nb_events* objects of type *rte_event* structure
2725  *   which contain the event object enqueue operations to be processed.
2726  * @param nb_events
2727  *   The number of event objects to enqueue, typically number of
2728  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2729  *   available for this port.
2730  *
2731  * @return
2732  *   The number of event objects actually enqueued on the event device. The
2733  *   return value can be less than the value of the *nb_events* parameter when
2734  *   the event devices queue is full or if invalid parameters are specified in a
2735  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2736  *   events at the end of ev[] are not consumed and the caller has to take care
2737  *   of them, and rte_errno is set accordingly. Possible errno values include:
2738  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2739  *              ID is invalid, or an event's sched type doesn't match the
2740  *              capabilities of the destination queue.
2741  *   - ENOSPC   The event port was backpressured and unable to enqueue
2742  *              one or more events. This error code is only applicable to
2743  *              closed systems.
2744  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2745  * @see rte_event_enqueue_burst()
2746  */
2747 static inline uint16_t
2748 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2749 				const struct rte_event ev[], uint16_t nb_events)
2750 {
2751 	const struct rte_event_fp_ops *fp_ops;
2752 
2753 	fp_ops = &rte_event_fp_ops[dev_id];
2754 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2755 					 fp_ops->enqueue_forward_burst);
2756 }
2757 
2758 /**
2759  * Dequeue a burst of events objects or an event object from the event port
2760  * designated by its *event_port_id*, on an event device designated
2761  * by its *dev_id*.
2762  *
2763  * rte_event_dequeue_burst() does not dictate the specifics of scheduling
2764  * algorithm as each eventdev driver may have different criteria to schedule
2765  * an event. However, in general, from an application perspective scheduler may
2766  * use the following scheme to dispatch an event to the port.
2767  *
2768  * 1) Selection of event queue based on
2769  *   a) The list of event queues are linked to the event port.
2770  *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
2771  *   queue selection from list is based on event queue priority relative to
2772  *   other event queue supplied as *priority* in rte_event_queue_setup()
2773  *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
2774  *   queue selection from the list is based on event priority supplied as
2775  *   *priority* in rte_event_enqueue_burst()
2776  * 2) Selection of event
2777  *   a) The number of flows available in selected event queue.
2778  *   b) Schedule type method associated with the event
2779  *
2780  * The *nb_events* parameter is the maximum number of event objects to dequeue
2781  * which are returned in the *ev* array of *rte_event* structure.
2782  *
2783  * The rte_event_dequeue_burst() function returns the number of events objects
2784  * it actually dequeued. A return value equal to *nb_events* means that all
2785  * event objects have been dequeued.
2786  *
2787  * The number of events dequeued is the number of scheduler contexts held by
2788  * this port. These contexts are automatically released in the next
2789  * rte_event_dequeue_burst() invocation if the port supports implicit
2790  * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
2791  * operation can be used to release the contexts early.
2792  *
2793  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2794  * enqueued to the same port that their associated events were dequeued from.
2795  *
2796  * @param dev_id
2797  *   The identifier of the device.
2798  * @param port_id
2799  *   The identifier of the event port.
2800  * @param[out] ev
2801  *   Points to an array of *nb_events* objects of type *rte_event* structure
2802  *   for output to be populated with the dequeued event objects.
2803  * @param nb_events
2804  *   The maximum number of event objects to dequeue, typically number of
2805  *   rte_event_port_dequeue_depth() available for this port.
2806  *
2807  * @param timeout_ticks
2808  *   - 0 no-wait, returns immediately if there is no event.
2809  *   - >0 wait for the event, if the device is configured with
2810  *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
2811  *   at least one event is available or *timeout_ticks* time.
2812  *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
2813  *   then this function will wait until the event available or
2814  *   *dequeue_timeout_ns* ns which was previously supplied to
2815  *   rte_event_dev_configure()
2816  *
2817  * @return
2818  * The number of event objects actually dequeued from the port. The return
2819  * value can be less than the value of the *nb_events* parameter when the
2820  * event port's queue is not full.
2821  *
2822  * @see rte_event_port_dequeue_depth()
2823  */
2824 static inline uint16_t
2825 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2826 			uint16_t nb_events, uint64_t timeout_ticks)
2827 {
2828 	const struct rte_event_fp_ops *fp_ops;
2829 	void *port;
2830 
2831 	fp_ops = &rte_event_fp_ops[dev_id];
2832 	port = fp_ops->data[port_id];
2833 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2834 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2835 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2836 		rte_errno = EINVAL;
2837 		return 0;
2838 	}
2839 
2840 	if (port == NULL) {
2841 		rte_errno = EINVAL;
2842 		return 0;
2843 	}
2844 #endif
2845 	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2846 	/*
2847 	 * Allow zero cost non burst mode routine invocation if application
2848 	 * requests nb_events as const one
2849 	 */
2850 	if (nb_events == 1)
2851 		return (fp_ops->dequeue)(port, ev, timeout_ticks);
2852 	else
2853 		return (fp_ops->dequeue_burst)(port, ev, nb_events,
2854 					       timeout_ticks);
2855 }
2856 
2857 #define RTE_EVENT_DEV_MAINT_OP_FLUSH          (1 << 0)
2858 /**< Force an immediately flush of any buffered events in the port,
2859  * potentially at the cost of additional overhead.
2860  *
2861  * @see rte_event_maintain()
2862  */
2863 
2864 /**
2865  * Maintain an event device.
2866  *
2867  * This function is only relevant for event devices which do not have
2868  * the @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE flag set. Such devices
2869  * require an application thread using a particular port to
2870  * periodically call rte_event_maintain() on that port during periods
2871  * which it is neither attempting to enqueue events to nor dequeue
2872  * events from the port. rte_event_maintain() is a low-overhead
2873  * function and should be called at a high rate (e.g., in the
2874  * application's poll loop).
2875  *
2876  * No port may be left unmaintained.
2877  *
2878  * At the application thread's convenience, rte_event_maintain() may
2879  * (but is not required to) be called even during periods when enqueue
2880  * or dequeue functions are being called, at the cost of a slight
2881  * increase in overhead.
2882  *
2883  * rte_event_maintain() may be called on event devices which have set
2884  * @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, in which case it is a
2885  * no-operation.
2886  *
2887  * @param dev_id
2888  *   The identifier of the device.
2889  * @param port_id
2890  *   The identifier of the event port.
2891  * @param op
2892  *   0, or @ref RTE_EVENT_DEV_MAINT_OP_FLUSH.
2893  * @return
2894  *  - 0 on success.
2895  *  - -EINVAL if *dev_id*,  *port_id*, or *op* is invalid.
2896  *
2897  * @see RTE_EVENT_DEV_CAP_MAINTENANCE_FREE
2898  */
2899 static inline int
2900 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2901 {
2902 	const struct rte_event_fp_ops *fp_ops;
2903 	void *port;
2904 
2905 	fp_ops = &rte_event_fp_ops[dev_id];
2906 	port = fp_ops->data[port_id];
2907 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2908 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2909 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2910 		return -EINVAL;
2911 
2912 	if (port == NULL)
2913 		return -EINVAL;
2914 
2915 	if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2916 		return -EINVAL;
2917 #endif
2918 	rte_eventdev_trace_maintain(dev_id, port_id, op);
2919 
2920 	if (fp_ops->maintain != NULL)
2921 		fp_ops->maintain(port, op);
2922 
2923 	return 0;
2924 }
2925 
2926 /**
2927  * Change the active profile on an event port.
2928  *
2929  * This function is used to change the current active profile on an event port
2930  * when multiple link profiles are configured on an event port through the
2931  * function call ``rte_event_port_profile_links_set``.
2932  *
2933  * On the subsequent ``rte_event_dequeue_burst`` call, only the event queues
2934  * that were associated with the newly active profile will participate in
2935  * scheduling.
2936  *
2937  * @param dev_id
2938  *   The identifier of the device.
2939  * @param port_id
2940  *   The identifier of the event port.
2941  * @param profile_id
2942  *   The identifier of the profile.
2943  * @return
2944  *  - 0 on success.
2945  *  - -EINVAL if *dev_id*,  *port_id*, or *profile_id* is invalid.
2946  */
2947 static inline uint8_t
2948 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2949 {
2950 	const struct rte_event_fp_ops *fp_ops;
2951 	void *port;
2952 
2953 	fp_ops = &rte_event_fp_ops[dev_id];
2954 	port = fp_ops->data[port_id];
2955 
2956 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2957 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2958 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2959 		return -EINVAL;
2960 
2961 	if (port == NULL)
2962 		return -EINVAL;
2963 
2964 	if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2965 		return -EINVAL;
2966 #endif
2967 	rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2968 
2969 	return fp_ops->profile_switch(port, profile_id);
2970 }
2971 
2972 /**
2973  * Modify the pre-schedule type to use on an event port.
2974  *
2975  * This function is used to change the current pre-schedule type configured
2976  * on an event port, the pre-schedule type can be set to none to disable pre-scheduling.
2977  * This effects the subsequent ``rte_event_dequeue_burst`` call.
2978  * The event device should support RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE capability.
2979  *
2980  * To avoid fastpath capability checks if an event device does not support
2981  * RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE capability, then this function will
2982  * return -ENOTSUP.
2983  *
2984  * @param dev_id
2985  *   The identifier of the device.
2986  * @param port_id
2987  *   The identifier of the event port.
2988  * @param type
2989  *   The preschedule type to use on the event port.
2990  * @return
2991  *  - 0 on success.
2992  *  - -EINVAL if *dev_id*,  *port_id*, or *type* is invalid.
2993  *  - -ENOTSUP if the device does not support per port preschedule capability.
2994  */
2995 __rte_experimental
2996 static inline int
2997 rte_event_port_preschedule_modify(uint8_t dev_id, uint8_t port_id,
2998 				  enum rte_event_dev_preschedule_type type)
2999 {
3000 	const struct rte_event_fp_ops *fp_ops;
3001 	void *port;
3002 
3003 	fp_ops = &rte_event_fp_ops[dev_id];
3004 	port = fp_ops->data[port_id];
3005 
3006 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
3007 	if (dev_id >= RTE_EVENT_MAX_DEVS || port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
3008 		return -EINVAL;
3009 
3010 	if (port == NULL)
3011 		return -EINVAL;
3012 #endif
3013 	rte_eventdev_trace_port_preschedule_modify(dev_id, port_id, type);
3014 
3015 	return fp_ops->preschedule_modify(port, type);
3016 }
3017 
3018 #ifdef __cplusplus
3019 }
3020 #endif
3021 
3022 #endif /* _RTE_EVENTDEV_H_ */
3023