xref: /dpdk/lib/eventdev/rte_eventdev.h (revision d77c9cbf3d731fee0e627edae5dc64cee98e567a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
11 /**
12  * @file
13  *
14  * RTE Event Device API
15  * ====================
16  *
17  * In a traditional DPDK application model, the application polls Ethdev port RX
18  * queues to look for work, and processing is done in a run-to-completion manner,
19  * after which the packets are transmitted on a Ethdev TX queue. Load is
20  * distributed by statically assigning ports and queues to lcores, and NIC
21  * receive-side scaling (RSS), or similar, is employed to distribute network flows
22  * (and thus work) on the same port across multiple RX queues.
23  *
24  * In contrast, in an event-driven model, as supported by this "eventdev" library,
25  * incoming packets (or other input events) are fed into an event device, which
26  * schedules those packets across the available lcores, in accordance with its configuration.
27  * This event-driven programming model offers applications automatic multicore scaling,
28  * dynamic load balancing, pipelining, packet order maintenance, synchronization,
29  * and prioritization/quality of service.
30  *
31  * The Event Device API is composed of two parts:
32  *
33  * - The application-oriented Event API that includes functions to setup
34  *   an event device (configure it, setup its queues, ports and start it), to
35  *   establish the links between queues and ports to receive events, and so on.
36  *
37  * - The driver-oriented Event API that exports a function allowing
38  *   an event poll Mode Driver (PMD) to register itself as
39  *   an event device driver.
40  *
41  * Application-oriented Event API
42  * ------------------------------
43  *
44  * Event device components:
45  *
46  *                     +-----------------+
47  *                     | +-------------+ |
48  *        +-------+    | |    flow 0   | |
49  *        |Packet |    | +-------------+ |
50  *        |event  |    | +-------------+ |
51  *        |       |    | |    flow 1   | |port_link(port0, queue0)
52  *        +-------+    | +-------------+ |     |     +--------+
53  *        +-------+    | +-------------+ o-----v-----o        |dequeue +------+
54  *        |Crypto |    | |    flow n   | |           | event  +------->|Core 0|
55  *        |work   |    | +-------------+ o----+      | port 0 |        |      |
56  *        |done ev|    |  event queue 0  |    |      +--------+        +------+
57  *        +-------+    +-----------------+    |
58  *        +-------+                           |
59  *        |Timer  |    +-----------------+    |      +--------+
60  *        |expiry |    | +-------------+ |    +------o        |dequeue +------+
61  *        |event  |    | |    flow 0   | o-----------o event  +------->|Core 1|
62  *        +-------+    | +-------------+ |      +----o port 1 |        |      |
63  *       Event enqueue | +-------------+ |      |    +--------+        +------+
64  *     o-------------> | |    flow 1   | |      |
65  *        enqueue(     | +-------------+ |      |
66  *        queue_id,    |                 |      |    +--------+        +------+
67  *        flow_id,     | +-------------+ |      |    |        |dequeue |Core 2|
68  *        sched_type,  | |    flow n   | o-----------o event  +------->|      |
69  *        event_type,  | +-------------+ |      |    | port 2 |        +------+
70  *        subev_type,  |  event queue 1  |      |    +--------+
71  *        event)       +-----------------+      |    +--------+
72  *                                              |    |        |dequeue +------+
73  *        +-------+    +-----------------+      |    | event  +------->|Core n|
74  *        |Core   |    | +-------------+ o-----------o port n |        |      |
75  *        |(SW)   |    | |    flow 0   | |      |    +--------+        +--+---+
76  *        |event  |    | +-------------+ |      |                         |
77  *        +-------+    | +-------------+ |      |                         |
78  *            ^        | |    flow 1   | |      |                         |
79  *            |        | +-------------+ o------+                         |
80  *            |        | +-------------+ |                                |
81  *            |        | |    flow n   | |                                |
82  *            |        | +-------------+ |                                |
83  *            |        |  event queue n  |                                |
84  *            |        +-----------------+                                |
85  *            |                                                           |
86  *            +-----------------------------------------------------------+
87  *
88  * **Event device**: A hardware or software-based event scheduler.
89  *
90  * **Event**: Represents an item of work and is the smallest unit of scheduling.
91  * An event carries metadata, such as queue ID, scheduling type, and event priority,
92  * and data such as one or more packets or other kinds of buffers.
93  * Some examples of events are:
94  * - a software-generated item of work originating from a lcore,
95  *   perhaps carrying a packet to be processed.
96  * - a crypto work completion notification.
97  * - a timer expiry notification.
98  *
99  * **Event queue**: A queue containing events that are to be scheduled by the event device.
100  * An event queue contains events of different flows associated with scheduling
101  * types, such as atomic, ordered, or parallel.
102  * Each event given to an event device must have a valid event queue id field in the metadata,
103  * to specify on which event queue in the device the event must be placed,
104  * for later scheduling.
105  *
106  * **Event port**: An application's interface into the event dev for enqueue and
107  * dequeue operations. Each event port can be linked with one or more
108  * event queues for dequeue operations.
109  * Enqueue and dequeue from a port is not thread-safe, and the expected use-case is
110  * that each port is polled by only a single lcore. [If this is not the case,
111  * a suitable synchronization mechanism should be used to prevent simultaneous
112  * access from multiple lcores.]
113  * To schedule events to an lcore, the event device will schedule them to the event port(s)
114  * being polled by that lcore.
115  *
116  * *NOTE*: By default, all the functions of the Event Device API exported by a PMD
117  * are non-thread-safe functions, which must not be invoked on the same object in parallel on
118  * different logical cores.
119  * For instance, the dequeue function of a PMD cannot be invoked in parallel on two logical
120  * cores to operate on same  event port. Of course, this function
121  * can be invoked in parallel by different logical cores on different ports.
122  * It is the responsibility of the upper level application to enforce this rule.
123  *
124  * In all functions of the Event API, the Event device is
125  * designated by an integer >= 0 named the device identifier *dev_id*
126  *
127  * The functions exported by the application Event API to setup a device
128  * must be invoked in the following order:
129  *     - rte_event_dev_configure()
130  *     - rte_event_queue_setup()
131  *     - rte_event_port_setup()
132  *     - rte_event_port_link()
133  *     - rte_event_dev_start()
134  *
135  * Then, the application can invoke, in any order, the functions
136  * exported by the Event API to dequeue events, enqueue events,
137  * and link and unlink event queue(s) to event ports.
138  *
139  * Before configuring a device, an application should call rte_event_dev_info_get()
140  * to determine the capabilities of the event device, and any queue or port
141  * limits of that device. The parameters set in the various device configuration
142  * structures may need to be adjusted based on the max values provided in the
143  * device information structure returned from the rte_event_dev_info_get() API.
144  * An application may use rte_event_queue_default_conf_get() or
145  * rte_event_port_default_conf_get() to get the default configuration
146  * to set up an event queue or event port by overriding few default values.
147  *
148  * If the application wants to change the configuration (i.e. call
149  * rte_event_dev_configure(), rte_event_queue_setup(), or
150  * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the
151  * device and then do the reconfiguration before calling rte_event_dev_start()
152  * again. The schedule, enqueue and dequeue functions should not be invoked
153  * when the device is stopped.
154  *
155  * Finally, an application can close an Event device by invoking the
156  * rte_event_dev_close() function. Once closed, a device cannot be
157  * reconfigured or restarted.
158  *
159  * Driver-Oriented Event API
160  * -------------------------
161  *
162  * At the Event driver level, Event devices are represented by a generic
163  * data structure of type *rte_event_dev*.
164  *
165  * Event devices are dynamically registered during the PCI/SoC device probing
166  * phase performed at EAL initialization time.
167  * When an Event device is being probed, an *rte_event_dev* structure is allocated
168  * for it and the event_dev_init() function supplied by the Event driver
169  * is invoked to properly initialize the device.
170  *
171  * The role of the device init function is to reset the device hardware or
172  * to initialize the software event driver implementation.
173  *
174  * If the device init operation is successful, the device is assigned a device
175  * id (dev_id) for application use.
176  * Otherwise, the *rte_event_dev* structure is freed.
177  *
178  * Each function of the application Event API invokes a specific function
179  * of the PMD that controls the target device designated by its device
180  * identifier.
181  *
182  * For this purpose, all device-specific functions of an Event driver are
183  * supplied through a set of pointers contained in a generic structure of type
184  * *event_dev_ops*.
185  * The address of the *event_dev_ops* structure is stored in the *rte_event_dev*
186  * structure by the device init function of the Event driver, which is
187  * invoked during the PCI/SoC device probing phase, as explained earlier.
188  *
189  * In other words, each function of the Event API simply retrieves the
190  * *rte_event_dev* structure associated with the device identifier and
191  * performs an indirect invocation of the corresponding driver function
192  * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure.
193  *
194  * For performance reasons, the addresses of the fast-path functions of the
195  * event driver are not contained in the *event_dev_ops* structure.
196  * Instead, they are directly stored at the beginning of the *rte_event_dev*
197  * structure to avoid an extra indirect memory access during their invocation.
198  *
199  * Event Enqueue, Dequeue and Scheduling
200  * -------------------------------------
201  *
202  * RTE event device drivers do not use interrupts for enqueue or dequeue
203  * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue
204  * functions to applications.
205  *
206  * The events are injected to event device through *enqueue* operation by
207  * event producers in the system. The typical event producers are ethdev
208  * subsystem for generating packet events, CPU(SW) for generating events based
209  * on different stages of application processing, cryptodev for generating
210  * crypto work completion notification etc
211  *
212  * The *dequeue* operation gets one or more events from the event ports.
213  * The application processes the events and sends them to a downstream event queue through
214  * rte_event_enqueue_burst(), if it is an intermediate stage of event processing.
215  * On the final stage of processing, the application may use the Tx adapter API for maintaining
216  * the event ingress order while sending the packet/event on the wire via NIC Tx.
217  *
218  * The point at which events are scheduled to ports depends on the device.
219  * For hardware devices, scheduling occurs asynchronously without any software
220  * intervention. Software schedulers can either be distributed
221  * (each worker thread schedules events to its own port) or centralized
222  * (a dedicated thread schedules to all ports). Distributed software schedulers
223  * perform the scheduling inside the enqueue or dequeue functions, whereas centralized
224  * software schedulers need a dedicated service core for scheduling.
225  * The absence of the RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag
226  * indicates that the device is centralized and thus needs a dedicated scheduling
227  * thread (generally an RTE service that should be mapped to one or more service cores)
228  * that repeatedly calls the software specific scheduling function.
229  *
230  * An event driven worker thread has following typical workflow on fastpath:
231  * \code{.c}
232  *	while (1) {
233  *		rte_event_dequeue_burst(...);
234  *		(event processing)
235  *		rte_event_enqueue_burst(...);
236  *	}
237  * \endcode
238  */
239 
240 #ifdef __cplusplus
241 extern "C" {
242 #endif
243 
244 #include <rte_compat.h>
245 #include <rte_common.h>
246 #include <rte_errno.h>
247 #include <rte_mbuf_pool_ops.h>
248 #include <rte_mempool.h>
249 
250 #include "rte_eventdev_trace_fp.h"
251 
252 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
253 struct rte_event;
254 
255 /* Event device capability bitmap flags */
256 #define RTE_EVENT_DEV_CAP_QUEUE_QOS           (1ULL << 0)
257 /**< Event scheduling prioritization is based on the priority and weight
258  * associated with each event queue.
259  *
260  * Events from a queue with highest priority
261  * are scheduled first. If the queues are of same priority, weight of the queues
262  * are considered to select a queue in a weighted round robin fashion.
263  * Subsequent dequeue calls from an event port could see events from the same
264  * event queue, if the queue is configured with an affinity count. Affinity
265  * count is the number of subsequent dequeue calls, in which an event port
266  * should use the same event queue if the queue is non-empty
267  *
268  * NOTE: A device may use both queue prioritization and event prioritization
269  * (@ref RTE_EVENT_DEV_CAP_EVENT_QOS capability) when making packet scheduling decisions.
270  *
271  *  @see rte_event_queue_setup()
272  *  @see rte_event_queue_attr_set()
273  */
274 #define RTE_EVENT_DEV_CAP_EVENT_QOS           (1ULL << 1)
275 /**< Event scheduling prioritization is based on the priority associated with
276  *  each event.
277  *
278  *  Priority of each event is supplied in *rte_event* structure
279  *  on each enqueue operation.
280  *  If this capability is not set, the priority field of the event structure
281  *  is ignored for each event.
282  *
283  * NOTE: A device may use both queue prioritization (@ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability)
284  * and event prioritization when making packet scheduling decisions.
285 
286  *  @see rte_event_enqueue_burst()
287  */
288 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED   (1ULL << 2)
289 /**< Event device operates in distributed scheduling mode.
290  *
291  * In distributed scheduling mode, event scheduling happens in HW or
292  * rte_event_dequeue_burst() / rte_event_enqueue_burst() or the combination of these two.
293  * If the flag is not set then eventdev is centralized and thus needs a
294  * dedicated service core that acts as a scheduling thread.
295  *
296  * @see rte_event_dev_service_id_get()
297  */
298 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES     (1ULL << 3)
299 /**< Event device is capable of accepting enqueued events, of any type
300  * advertised as supported by the device, to all destination queues.
301  *
302  * When this capability is set, and @ref RTE_EVENT_QUEUE_CFG_ALL_TYPES flag is set
303  * in @ref rte_event_queue_conf.event_queue_cfg, the "schedule_type" field of the
304  * @ref rte_event_queue_conf structure is ignored when a queue is being configured.
305  * Instead the "sched_type" field of each event enqueued is used to
306  * select the scheduling to be performed on that event.
307  *
308  * If this capability is not set, or the configuration flag is not set,
309  * the queue only supports events of the *RTE_SCHED_TYPE_* type specified
310  * in the @ref rte_event_queue_conf structure  at time of configuration.
311  * The behaviour when events of other scheduling types are sent to the queue is
312  * undefined.
313  *
314  * @see RTE_EVENT_QUEUE_CFG_ALL_TYPES
315  * @see RTE_SCHED_TYPE_ATOMIC
316  * @see RTE_SCHED_TYPE_ORDERED
317  * @see RTE_SCHED_TYPE_PARALLEL
318  * @see rte_event_queue_conf.event_queue_cfg
319  * @see rte_event_queue_conf.schedule_type
320  * @see rte_event_enqueue_burst()
321  */
322 #define RTE_EVENT_DEV_CAP_BURST_MODE          (1ULL << 4)
323 /**< Event device is capable of operating in burst mode for enqueue(forward,
324  * release) and dequeue operation.
325  *
326  * If this capability is not set, application
327  * can still use the rte_event_dequeue_burst() and rte_event_enqueue_burst() but
328  * PMD accepts or returns only one event at a time.
329  *
330  * @see rte_event_dequeue_burst()
331  * @see rte_event_enqueue_burst()
332  */
333 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE    (1ULL << 5)
334 /**< Event device ports support disabling the implicit release feature, in
335  * which the port will release all unreleased events in its dequeue operation.
336  *
337  * If this capability is set and the port is configured with implicit release
338  * disabled, the application is responsible for explicitly releasing events
339  * using either the @ref RTE_EVENT_OP_FORWARD or the @ref RTE_EVENT_OP_RELEASE event
340  * enqueue operations.
341  *
342  * @see rte_event_dequeue_burst()
343  * @see rte_event_enqueue_burst()
344  */
345 
346 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE         (1ULL << 6)
347 /**< Event device is capable of operating in non-sequential mode.
348  *
349  * The path of the event is not necessary to be sequential. Application can change
350  * the path of event at runtime and events may be sent to queues in any order.
351  *
352  * If the flag is not set, then event each event will follow a path from queue 0
353  * to queue 1 to queue 2 etc.
354  * The eventdev will return an error when the application enqueues an event for a
355  * qid which is not the next in the sequence.
356  */
357 
358 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK   (1ULL << 7)
359 /**< Event device is capable of reconfiguring the queue/port link at runtime.
360  *
361  * If the flag is not set, the eventdev queue/port link is only can be
362  * configured during  initialization, or by stopping the device and
363  * then later restarting it after reconfiguration.
364  *
365  * @see rte_event_port_link()
366  * @see rte_event_port_unlink()
367  */
368 
369 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
370 /**< Event device is capable of setting up links between multiple queues and a single port.
371  *
372  * If the flag is not set, each port may only be linked to a single queue, and
373  * so can only receive events from that queue.
374  * However, each queue may be linked to multiple ports.
375  *
376  * @see rte_event_port_link()
377  */
378 
379 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
380 /**< Event device preserves the flow ID from the enqueued event to the dequeued event.
381  *
382  * If this flag is not set,
383  * the content of the flow-id field in dequeued events is implementation dependent.
384  *
385  * @see rte_event_dequeue_burst()
386  */
387 
388 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
389 /**< Event device *does not* require calls to rte_event_maintain().
390  *
391  * An event device that does not set this flag requires calls to
392  * rte_event_maintain() during periods when neither
393  * rte_event_dequeue_burst() nor rte_event_enqueue_burst() are called
394  * on a port. This will allow the event device to perform internal
395  * processing, such as flushing buffered events, return credits to a
396  * global pool, or process signaling related to load balancing.
397  *
398  * @see rte_event_maintain()
399  */
400 
401 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
402 /**< Event device is capable of changing the queue attributes at runtime i.e
403  * after rte_event_queue_setup() or rte_event_dev_start() call sequence.
404  *
405  * If this flag is not set, event queue attributes can only be configured during
406  * rte_event_queue_setup().
407  *
408  * @see rte_event_queue_setup()
409  */
410 
411 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
412 /**< Event device is capable of supporting multiple link profiles per event port.
413  *
414  * When set, the value of `rte_event_dev_info::max_profiles_per_port` is greater
415  * than one, and multiple profiles may be configured and then switched at runtime.
416  * If not set, only a single profile may be configured, which may itself be
417  * runtime adjustable (if @ref RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK is set).
418  *
419  * @see rte_event_port_profile_links_set()
420  * @see rte_event_port_profile_links_get()
421  * @see rte_event_port_profile_switch()
422  * @see RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK
423  */
424 
425 #define RTE_EVENT_DEV_CAP_ATOMIC  (1ULL << 13)
426 /**< Event device is capable of atomic scheduling.
427  * When this flag is set, the application can configure queues with scheduling type
428  * atomic on this event device.
429  *
430  * @see RTE_SCHED_TYPE_ATOMIC
431  */
432 
433 #define RTE_EVENT_DEV_CAP_ORDERED  (1ULL << 14)
434 /**< Event device is capable of ordered scheduling.
435  * When this flag is set, the application can configure queues with scheduling type
436  * ordered on this event device.
437  *
438  * @see RTE_SCHED_TYPE_ORDERED
439  */
440 
441 #define RTE_EVENT_DEV_CAP_PARALLEL  (1ULL << 15)
442 /**< Event device is capable of parallel scheduling.
443  * When this flag is set, the application can configure queues with scheduling type
444  * parallel on this event device.
445  *
446  * @see RTE_SCHED_TYPE_PARALLEL
447  */
448 
449 /* Event device priority levels */
450 #define RTE_EVENT_DEV_PRIORITY_HIGHEST   0
451 /**< Highest priority level for events and queues.
452  *
453  * @see rte_event_queue_setup()
454  * @see rte_event_enqueue_burst()
455  * @see rte_event_port_link()
456  */
457 #define RTE_EVENT_DEV_PRIORITY_NORMAL    128
458 /**< Normal priority level for events and queues.
459  *
460  * @see rte_event_queue_setup()
461  * @see rte_event_enqueue_burst()
462  * @see rte_event_port_link()
463  */
464 #define RTE_EVENT_DEV_PRIORITY_LOWEST    255
465 /**< Lowest priority level for events and queues.
466  *
467  * @see rte_event_queue_setup()
468  * @see rte_event_enqueue_burst()
469  * @see rte_event_port_link()
470  */
471 
472 /* Event queue scheduling weights */
473 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
474 /**< Highest weight of an event queue.
475  *
476  * @see rte_event_queue_attr_get()
477  * @see rte_event_queue_attr_set()
478  */
479 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
480 /**< Lowest weight of an event queue.
481  *
482  * @see rte_event_queue_attr_get()
483  * @see rte_event_queue_attr_set()
484  */
485 
486 /* Event queue scheduling affinity */
487 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
488 /**< Highest scheduling affinity of an event queue.
489  *
490  * @see rte_event_queue_attr_get()
491  * @see rte_event_queue_attr_set()
492  */
493 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
494 /**< Lowest scheduling affinity of an event queue.
495  *
496  * @see rte_event_queue_attr_get()
497  * @see rte_event_queue_attr_set()
498  */
499 
500 /**
501  * Get the total number of event devices.
502  *
503  * @return
504  *   The total number of usable event devices.
505  */
506 uint8_t
507 rte_event_dev_count(void);
508 
509 /**
510  * Get the device identifier for the named event device.
511  *
512  * @param name
513  *   Event device name to select the event device identifier.
514  *
515  * @return
516  *   Event device identifier (dev_id >= 0) on success.
517  *   Negative error code on failure:
518  *   - -EINVAL - input name parameter is invalid.
519  *   - -ENODEV - no event device found with that name.
520  */
521 int
522 rte_event_dev_get_dev_id(const char *name);
523 
524 /**
525  * Return the NUMA socket to which a device is connected.
526  *
527  * @param dev_id
528  *   The identifier of the device.
529  * @return
530  *   The NUMA socket id to which the device is connected or
531  *   a default of zero if the socket could not be determined.
532  *   -EINVAL on error, where the given dev_id value does not
533  *   correspond to any event device.
534  */
535 int
536 rte_event_dev_socket_id(uint8_t dev_id);
537 
538 /**
539  * Event device information
540  */
541 struct rte_event_dev_info {
542 	const char *driver_name;	/**< Event driver name. */
543 	struct rte_device *dev;	/**< Device information. */
544 	uint32_t min_dequeue_timeout_ns;
545 	/**< Minimum global dequeue timeout(ns) supported by this device. */
546 	uint32_t max_dequeue_timeout_ns;
547 	/**< Maximum global dequeue timeout(ns) supported by this device. */
548 	uint32_t dequeue_timeout_ns;
549 	/**< Configured global dequeue timeout(ns) for this device. */
550 	uint8_t max_event_queues;
551 	/**< Maximum event queues supported by this device.
552 	 *
553 	 * This count excludes any queues covered by @ref max_single_link_event_port_queue_pairs.
554 	 */
555 	uint32_t max_event_queue_flows;
556 	/**< Maximum number of flows within an event queue supported by this device. */
557 	uint8_t max_event_queue_priority_levels;
558 	/**< Maximum number of event queue priority levels supported by this device.
559 	 *
560 	 * Valid when the device has @ref RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
561 	 *
562 	 * The implementation shall normalize priority values specified between
563 	 * @ref RTE_EVENT_DEV_PRIORITY_HIGHEST and @ref RTE_EVENT_DEV_PRIORITY_LOWEST
564 	 * to map them internally to this range of priorities.
565 	 * [For devices supporting a power-of-2 number of priority levels, this
566 	 * normalization will be done via a right-shift operation, so only the top
567 	 * log2(max_levels) bits will be used by the event device.]
568 	 *
569 	 * @see rte_event_queue_conf.priority
570 	 */
571 	uint8_t max_event_priority_levels;
572 	/**< Maximum number of event priority levels by this device.
573 	 *
574 	 * Valid when the device has @ref RTE_EVENT_DEV_CAP_EVENT_QOS capability.
575 	 *
576 	 * The implementation shall normalize priority values specified between
577 	 * @ref RTE_EVENT_DEV_PRIORITY_HIGHEST and @ref RTE_EVENT_DEV_PRIORITY_LOWEST
578 	 * to map them internally to this range of priorities.
579 	 * [For devices supporting a power-of-2 number of priority levels, this
580 	 * normalization will be done via a right-shift operation, so only the top
581 	 * log2(max_levels) bits will be used by the event device.]
582 	 *
583 	 * @see rte_event.priority
584 	 */
585 	uint8_t max_event_ports;
586 	/**< Maximum number of event ports supported by this device.
587 	 *
588 	 * This count excludes any ports covered by @ref max_single_link_event_port_queue_pairs.
589 	 */
590 	uint8_t max_event_port_dequeue_depth;
591 	/**< Maximum number of events that can be dequeued at a time from an event port
592 	 * on this device.
593 	 *
594 	 * A device that does not support burst dequeue
595 	 * (@ref RTE_EVENT_DEV_CAP_BURST_MODE) will set this to 1.
596 	 */
597 	uint32_t max_event_port_enqueue_depth;
598 	/**< Maximum number of events that can be enqueued at a time to an event port
599 	 * on this device.
600 	 *
601 	 * A device that does not support burst enqueue
602 	 * (@ref RTE_EVENT_DEV_CAP_BURST_MODE) will set this to 1.
603 	 */
604 	uint8_t max_event_port_links;
605 	/**< Maximum number of queues that can be linked to a single event port on this device.
606 	 */
607 	int32_t max_num_events;
608 	/**< A *closed system* event dev has a limit on the number of events it
609 	 * can manage at a time.
610 	 * Once the number of events tracked by an eventdev exceeds this number,
611 	 * any enqueues of NEW events will fail.
612 	 * An *open system* event dev does not have a limit and will specify this as -1.
613 	 */
614 	uint32_t event_dev_cap;
615 	/**< Event device capabilities flags (RTE_EVENT_DEV_CAP_*). */
616 	uint8_t max_single_link_event_port_queue_pairs;
617 	/**< Maximum number of event ports and queues, supported by this device,
618 	 * that are optimized for (and only capable of) single-link configurations.
619 	 * These ports and queues are not accounted for in @ref max_event_ports
620 	 * or @ref max_event_queues.
621 	 */
622 	uint8_t max_profiles_per_port;
623 	/**< Maximum number of event queue link profiles per event port.
624 	 * A device that doesn't support multiple profiles will set this as 1.
625 	 */
626 };
627 
628 /**
629  * Retrieve details of an event device's capabilities and configuration limits.
630  *
631  * @param dev_id
632  *   The identifier of the device.
633  *
634  * @param[out] dev_info
635  *   A pointer to a structure of type *rte_event_dev_info* to be filled with the
636  *   information about the device's capabilities.
637  *
638  * @return
639  *   - 0: Success, information about the event device is present in dev_info.
640  *   - <0: Failure, error code returned by the function.
641  *     - -EINVAL - invalid input parameters, e.g. incorrect device id.
642  *     - -ENOTSUP - device does not support returning capabilities information.
643  */
644 int
645 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
646 
647 /**
648  * The count of ports.
649  */
650 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
651 /**
652  * The count of queues.
653  */
654 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
655 /**
656  * The status of the device, zero for stopped, non-zero for started.
657  */
658 #define RTE_EVENT_DEV_ATTR_STARTED 2
659 
660 /**
661  * Get an attribute from a device.
662  *
663  * @param dev_id Eventdev id
664  * @param attr_id The attribute ID to retrieve
665  * @param[out] attr_value A pointer that will be filled in with the attribute
666  *             value if successful.
667  *
668  * @return
669  *   - 0: Successfully retrieved attribute value
670  *   - -EINVAL: Invalid device or  *attr_id* provided, or *attr_value* is NULL
671  */
672 int
673 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
674 		       uint32_t *attr_value);
675 
676 
677 /* Event device configuration bitmap flags */
678 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
679 /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns.
680  *  @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst()
681  */
682 
683 /** Event device configuration structure */
684 struct rte_event_dev_config {
685 	uint32_t dequeue_timeout_ns;
686 	/**< rte_event_dequeue_burst() timeout on this device.
687 	 * This value should be in the range of *min_dequeue_timeout_ns* and
688 	 * *max_dequeue_timeout_ns* which previously provided in
689 	 * rte_event_dev_info_get()
690 	 * The value 0 is allowed, in which case, default dequeue timeout used.
691 	 * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
692 	 */
693 	int32_t nb_events_limit;
694 	/**< In a *closed system* this field is the limit on maximum number of
695 	 * events that can be inflight in the eventdev at a given time. The
696 	 * limit is required to ensure that the finite space in a closed system
697 	 * is not overwhelmed. The value cannot exceed the *max_num_events*
698 	 * as provided by rte_event_dev_info_get().
699 	 * This value should be set to -1 for *open system*.
700 	 */
701 	uint8_t nb_event_queues;
702 	/**< Number of event queues to configure on this device.
703 	 * This value cannot exceed the *max_event_queues* which previously
704 	 * provided in rte_event_dev_info_get()
705 	 */
706 	uint8_t nb_event_ports;
707 	/**< Number of event ports to configure on this device.
708 	 * This value cannot exceed the *max_event_ports* which previously
709 	 * provided in rte_event_dev_info_get()
710 	 */
711 	uint32_t nb_event_queue_flows;
712 	/**< Number of flows for any event queue on this device.
713 	 * This value cannot exceed the *max_event_queue_flows* which previously
714 	 * provided in rte_event_dev_info_get()
715 	 */
716 	uint32_t nb_event_port_dequeue_depth;
717 	/**< Maximum number of events can be dequeued at a time from an
718 	 * event port by this device.
719 	 * This value cannot exceed the *max_event_port_dequeue_depth*
720 	 * which previously provided in rte_event_dev_info_get().
721 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
722 	 * @see rte_event_port_setup()
723 	 */
724 	uint32_t nb_event_port_enqueue_depth;
725 	/**< Maximum number of events can be enqueued at a time from an
726 	 * event port by this device.
727 	 * This value cannot exceed the *max_event_port_enqueue_depth*
728 	 * which previously provided in rte_event_dev_info_get().
729 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
730 	 * @see rte_event_port_setup()
731 	 */
732 	uint32_t event_dev_cfg;
733 	/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
734 	uint8_t nb_single_link_event_port_queues;
735 	/**< Number of event ports and queues that will be singly-linked to
736 	 * each other. These are a subset of the overall event ports and
737 	 * queues; this value cannot exceed *nb_event_ports* or
738 	 * *nb_event_queues*. If the device has ports and queues that are
739 	 * optimized for single-link usage, this field is a hint for how many
740 	 * to allocate; otherwise, regular event ports and queues can be used.
741 	 */
742 };
743 
744 /**
745  * Configure an event device.
746  *
747  * This function must be invoked first before any other function in the
748  * API. This function can also be re-invoked when a device is in the
749  * stopped state.
750  *
751  * The caller may use rte_event_dev_info_get() to get the capability of each
752  * resources available for this event device.
753  *
754  * @param dev_id
755  *   The identifier of the device to configure.
756  * @param dev_conf
757  *   The event device configuration structure.
758  *
759  * @return
760  *   - 0: Success, device configured.
761  *   - <0: Error code returned by the driver configuration function.
762  */
763 int
764 rte_event_dev_configure(uint8_t dev_id,
765 			const struct rte_event_dev_config *dev_conf);
766 
767 /* Event queue specific APIs */
768 
769 /* Event queue configuration bitmap flags */
770 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES          (1ULL << 0)
771 /**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue
772  *
773  * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
774  * @see rte_event_enqueue_burst()
775  */
776 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK        (1ULL << 1)
777 /**< This event queue links only to a single event port.
778  *
779  *  @see rte_event_port_setup(), rte_event_port_link()
780  */
781 
782 /** Event queue configuration structure */
783 struct rte_event_queue_conf {
784 	uint32_t nb_atomic_flows;
785 	/**< The maximum number of active flows this queue can track at any
786 	 * given time. If the queue is configured for atomic scheduling (by
787 	 * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg
788 	 * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the
789 	 * value must be in the range of [1, nb_event_queue_flows], which was
790 	 * previously provided in rte_event_dev_configure().
791 	 */
792 	uint32_t nb_atomic_order_sequences;
793 	/**< The maximum number of outstanding events waiting to be
794 	 * reordered by this queue. In other words, the number of entries in
795 	 * this queue’s reorder buffer.When the number of events in the
796 	 * reorder buffer reaches to *nb_atomic_order_sequences* then the
797 	 * scheduler cannot schedule the events from this queue and invalid
798 	 * event will be returned from dequeue until one or more entries are
799 	 * freed up/released.
800 	 * If the queue is configured for ordered scheduling (by applying the
801 	 * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or
802 	 * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must
803 	 * be in the range of [1, nb_event_queue_flows], which was
804 	 * previously supplied to rte_event_dev_configure().
805 	 */
806 	uint32_t event_queue_cfg;
807 	/**< Queue cfg flags(EVENT_QUEUE_CFG_) */
808 	uint8_t schedule_type;
809 	/**< Queue schedule type(RTE_SCHED_TYPE_*).
810 	 * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in
811 	 * event_queue_cfg.
812 	 */
813 	uint8_t priority;
814 	/**< Priority for this event queue relative to other event queues.
815 	 * The requested priority should in the range of
816 	 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
817 	 * The implementation shall normalize the requested priority to
818 	 * event device supported priority value.
819 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
820 	 */
821 	uint8_t weight;
822 	/**< Weight of the event queue relative to other event queues.
823 	 * The requested weight should be in the range of
824 	 * [RTE_EVENT_DEV_WEIGHT_HIGHEST, RTE_EVENT_DEV_WEIGHT_LOWEST].
825 	 * The implementation shall normalize the requested weight to event
826 	 * device supported weight value.
827 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
828 	 */
829 	uint8_t affinity;
830 	/**< Affinity of the event queue relative to other event queues.
831 	 * The requested affinity should be in the range of
832 	 * [RTE_EVENT_DEV_AFFINITY_HIGHEST, RTE_EVENT_DEV_AFFINITY_LOWEST].
833 	 * The implementation shall normalize the requested affinity to event
834 	 * device supported affinity value.
835 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
836 	 */
837 };
838 
839 /**
840  * Retrieve the default configuration information of an event queue designated
841  * by its *queue_id* from the event driver for an event device.
842  *
843  * This function intended to be used in conjunction with rte_event_queue_setup()
844  * where caller needs to set up the queue by overriding few default values.
845  *
846  * @param dev_id
847  *   The identifier of the device.
848  * @param queue_id
849  *   The index of the event queue to get the configuration information.
850  *   The value must be in the range [0, nb_event_queues - 1]
851  *   previously supplied to rte_event_dev_configure().
852  * @param[out] queue_conf
853  *   The pointer to the default event queue configuration data.
854  * @return
855  *   - 0: Success, driver updates the default event queue configuration data.
856  *   - <0: Error code returned by the driver info get function.
857  *
858  * @see rte_event_queue_setup()
859  */
860 int
861 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
862 				 struct rte_event_queue_conf *queue_conf);
863 
864 /**
865  * Allocate and set up an event queue for an event device.
866  *
867  * @param dev_id
868  *   The identifier of the device.
869  * @param queue_id
870  *   The index of the event queue to setup. The value must be in the range
871  *   [0, nb_event_queues - 1] previously supplied to rte_event_dev_configure().
872  * @param queue_conf
873  *   The pointer to the configuration data to be used for the event queue.
874  *   NULL value is allowed, in which case default configuration	used.
875  *
876  * @see rte_event_queue_default_conf_get()
877  *
878  * @return
879  *   - 0: Success, event queue correctly set up.
880  *   - <0: event queue configuration failed
881  */
882 int
883 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
884 		      const struct rte_event_queue_conf *queue_conf);
885 
886 /**
887  * The priority of the queue.
888  */
889 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
890 /**
891  * The number of atomic flows configured for the queue.
892  */
893 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
894 /**
895  * The number of atomic order sequences configured for the queue.
896  */
897 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
898 /**
899  * The cfg flags for the queue.
900  */
901 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
902 /**
903  * The schedule type of the queue.
904  */
905 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
906 /**
907  * The weight of the queue.
908  */
909 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
910 /**
911  * Affinity of the queue.
912  */
913 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
914 
915 /**
916  * Get an attribute from a queue.
917  *
918  * @param dev_id
919  *   Eventdev id
920  * @param queue_id
921  *   Eventdev queue id
922  * @param attr_id
923  *   The attribute ID to retrieve
924  * @param[out] attr_value
925  *   A pointer that will be filled in with the attribute value if successful
926  *
927  * @return
928  *   - 0: Successfully returned value
929  *   - -EINVAL: invalid device, queue or attr_id provided, or attr_value was
930  *		NULL
931  *   - -EOVERFLOW: returned when attr_id is set to
932  *   RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to
933  *   RTE_EVENT_QUEUE_CFG_ALL_TYPES
934  */
935 int
936 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
937 			uint32_t *attr_value);
938 
939 /**
940  * Set an event queue attribute.
941  *
942  * @param dev_id
943  *   Eventdev id
944  * @param queue_id
945  *   Eventdev queue id
946  * @param attr_id
947  *   The attribute ID to set
948  * @param attr_value
949  *   The attribute value to set
950  *
951  * @return
952  *   - 0: Successfully set attribute.
953  *   - -EINVAL: invalid device, queue or attr_id.
954  *   - -ENOTSUP: device does not support setting the event attribute.
955  *   - <0: failed to set event queue attribute
956  */
957 int
958 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
959 			 uint64_t attr_value);
960 
961 /* Event port specific APIs */
962 
963 /* Event port configuration bitmap flags */
964 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
965 /**< Configure the port not to release outstanding events in
966  * rte_event_dev_dequeue_burst(). If set, all events received through
967  * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
968  * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
969  * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
970  */
971 #define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)
972 /**< This event port links only to a single event queue.
973  *
974  *  @see rte_event_port_setup(), rte_event_port_link()
975  */
976 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER       (1ULL << 2)
977 /**< Hint that this event port will primarily enqueue events to the system.
978  * A PMD can optimize its internal workings by assuming that this port is
979  * primarily going to enqueue NEW events.
980  *
981  * Note that this flag is only a hint, so PMDs must operate under the
982  * assumption that any port can enqueue an event with any type of op.
983  *
984  *  @see rte_event_port_setup()
985  */
986 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER       (1ULL << 3)
987 /**< Hint that this event port will primarily dequeue events from the system.
988  * A PMD can optimize its internal workings by assuming that this port is
989  * primarily going to consume events, and not enqueue FORWARD or RELEASE
990  * events.
991  *
992  * Note that this flag is only a hint, so PMDs must operate under the
993  * assumption that any port can enqueue an event with any type of op.
994  *
995  *  @see rte_event_port_setup()
996  */
997 #define RTE_EVENT_PORT_CFG_HINT_WORKER         (1ULL << 4)
998 /**< Hint that this event port will primarily pass existing events through.
999  * A PMD can optimize its internal workings by assuming that this port is
1000  * primarily going to FORWARD events, and not enqueue NEW or RELEASE events
1001  * often.
1002  *
1003  * Note that this flag is only a hint, so PMDs must operate under the
1004  * assumption that any port can enqueue an event with any type of op.
1005  *
1006  *  @see rte_event_port_setup()
1007  */
1008 
1009 /** Event port configuration structure */
1010 struct rte_event_port_conf {
1011 	int32_t new_event_threshold;
1012 	/**< A backpressure threshold for new event enqueues on this port.
1013 	 * Use for *closed system* event dev where event capacity is limited,
1014 	 * and cannot exceed the capacity of the event dev.
1015 	 * Configuring ports with different thresholds can make higher priority
1016 	 * traffic less likely to  be backpressured.
1017 	 * For example, a port used to inject NIC Rx packets into the event dev
1018 	 * can have a lower threshold so as not to overwhelm the device,
1019 	 * while ports used for worker pools can have a higher threshold.
1020 	 * This value cannot exceed the *nb_events_limit*
1021 	 * which was previously supplied to rte_event_dev_configure().
1022 	 * This should be set to '-1' for *open system*.
1023 	 */
1024 	uint16_t dequeue_depth;
1025 	/**< Configure number of bulk dequeues for this event port.
1026 	 * This value cannot exceed the *nb_event_port_dequeue_depth*
1027 	 * which previously supplied to rte_event_dev_configure().
1028 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
1029 	 */
1030 	uint16_t enqueue_depth;
1031 	/**< Configure number of bulk enqueues for this event port.
1032 	 * This value cannot exceed the *nb_event_port_enqueue_depth*
1033 	 * which previously supplied to rte_event_dev_configure().
1034 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
1035 	 */
1036 	uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */
1037 };
1038 
1039 /**
1040  * Retrieve the default configuration information of an event port designated
1041  * by its *port_id* from the event driver for an event device.
1042  *
1043  * This function intended to be used in conjunction with rte_event_port_setup()
1044  * where caller needs to set up the port by overriding few default values.
1045  *
1046  * @param dev_id
1047  *   The identifier of the device.
1048  * @param port_id
1049  *   The index of the event port to get the configuration information.
1050  *   The value must be in the range [0, nb_event_ports - 1]
1051  *   previously supplied to rte_event_dev_configure().
1052  * @param[out] port_conf
1053  *   The pointer to the default event port configuration data
1054  * @return
1055  *   - 0: Success, driver updates the default event port configuration data.
1056  *   - <0: Error code returned by the driver info get function.
1057  *
1058  * @see rte_event_port_setup()
1059  */
1060 int
1061 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
1062 				struct rte_event_port_conf *port_conf);
1063 
1064 /**
1065  * Allocate and set up an event port for an event device.
1066  *
1067  * @param dev_id
1068  *   The identifier of the device.
1069  * @param port_id
1070  *   The index of the event port to setup. The value must be in the range
1071  *   [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
1072  * @param port_conf
1073  *   The pointer to the configuration data to be used for the queue.
1074  *   NULL value is allowed, in which case default configuration	used.
1075  *
1076  * @see rte_event_port_default_conf_get()
1077  *
1078  * @return
1079  *   - 0: Success, event port correctly set up.
1080  *   - <0: Port configuration failed
1081  *   - (-EDQUOT) Quota exceeded(Application tried to link the queue configured
1082  *   with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1083  */
1084 int
1085 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
1086 		     const struct rte_event_port_conf *port_conf);
1087 
1088 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
1089 					  struct rte_event event, void *arg);
1090 /**< Callback function prototype that can be passed during
1091  * rte_event_port_release(), invoked once per a released event.
1092  */
1093 
1094 /**
1095  * Quiesce any core specific resources consumed by the event port.
1096  *
1097  * Event ports are generally coupled with lcores, and a given Hardware
1098  * implementation might require the PMD to store port specific data in the
1099  * lcore.
1100  * When the application decides to migrate the event port to another lcore
1101  * or teardown the current lcore it may to call `rte_event_port_quiesce`
1102  * to make sure that all the data associated with the event port are released
1103  * from the lcore, this might also include any prefetched events.
1104  * While releasing the event port from the lcore, this function calls the
1105  * user-provided flush callback once per event.
1106  *
1107  * @note Invocation of this API does not affect the existing port configuration.
1108  *
1109  * @param dev_id
1110  *   The identifier of the device.
1111  * @param port_id
1112  *   The index of the event port to setup. The value must be in the range
1113  *   [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
1114  * @param release_cb
1115  *   Callback function invoked once per flushed event.
1116  * @param args
1117  *   Argument supplied to callback.
1118  */
1119 void
1120 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
1121 		       rte_eventdev_port_flush_t release_cb, void *args);
1122 
1123 /**
1124  * The queue depth of the port on the enqueue side
1125  */
1126 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
1127 /**
1128  * The queue depth of the port on the dequeue side
1129  */
1130 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
1131 /**
1132  * The new event threshold of the port
1133  */
1134 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
1135 /**
1136  * The implicit release disable attribute of the port
1137  */
1138 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
1139 
1140 /**
1141  * Get an attribute from a port.
1142  *
1143  * @param dev_id
1144  *   Eventdev id
1145  * @param port_id
1146  *   Eventdev port id
1147  * @param attr_id
1148  *   The attribute ID to retrieve
1149  * @param[out] attr_value
1150  *   A pointer that will be filled in with the attribute value if successful
1151  *
1152  * @return
1153  *   - 0: Successfully returned value
1154  *   - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL
1155  */
1156 int
1157 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1158 			uint32_t *attr_value);
1159 
1160 /**
1161  * Start an event device.
1162  *
1163  * The device start step is the last one and consists of setting the event
1164  * queues to start accepting the events and schedules to event ports.
1165  *
1166  * On success, all basic functions exported by the API (event enqueue,
1167  * event dequeue and so on) can be invoked.
1168  *
1169  * @param dev_id
1170  *   Event device identifier
1171  * @return
1172  *   - 0: Success, device started.
1173  *   - -ESTALE : Not all ports of the device are configured
1174  *   - -ENOLINK: Not all queues are linked, which could lead to deadlock.
1175  */
1176 int
1177 rte_event_dev_start(uint8_t dev_id);
1178 
1179 /**
1180  * Stop an event device.
1181  *
1182  * This function causes all queued events to be drained, including those
1183  * residing in event ports. While draining events out of the device, this
1184  * function calls the user-provided flush callback (if one was registered) once
1185  * per event.
1186  *
1187  * The device can be restarted with a call to rte_event_dev_start(). Threads
1188  * that continue to enqueue/dequeue while the device is stopped, or being
1189  * stopped, will result in undefined behavior. This includes event adapters,
1190  * which must be stopped prior to stopping the eventdev.
1191  *
1192  * @param dev_id
1193  *   Event device identifier.
1194  *
1195  * @see rte_event_dev_stop_flush_callback_register()
1196  */
1197 void
1198 rte_event_dev_stop(uint8_t dev_id);
1199 
1200 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1201 					  struct rte_event event, void *arg);
1202 /**< Callback function called during rte_event_dev_stop(), invoked once per
1203  * flushed event.
1204  */
1205 
1206 /**
1207  * Registers a callback function to be invoked during rte_event_dev_stop() for
1208  * each flushed event. This function can be used to properly dispose of queued
1209  * events, for example events containing memory pointers.
1210  *
1211  * The callback function is only registered for the calling process. The
1212  * callback function must be registered in every process that can call
1213  * rte_event_dev_stop().
1214  *
1215  * To unregister a callback, call this function with a NULL callback pointer.
1216  *
1217  * @param dev_id
1218  *   The identifier of the device.
1219  * @param callback
1220  *   Callback function invoked once per flushed event.
1221  * @param userdata
1222  *   Argument supplied to callback.
1223  *
1224  * @return
1225  *  - 0 on success.
1226  *  - -EINVAL if *dev_id* is invalid
1227  *
1228  * @see rte_event_dev_stop()
1229  */
1230 int rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1231 					       rte_eventdev_stop_flush_t callback, void *userdata);
1232 
1233 /**
1234  * Close an event device. The device cannot be restarted!
1235  *
1236  * @param dev_id
1237  *   Event device identifier
1238  *
1239  * @return
1240  *  - 0 on successfully closing device
1241  *  - <0 on failure to close device
1242  *  - (-EAGAIN) if device is busy
1243  */
1244 int
1245 rte_event_dev_close(uint8_t dev_id);
1246 
1247 /**
1248  * Event vector structure.
1249  */
1250 struct rte_event_vector {
1251 	uint16_t nb_elem;
1252 	/**< Number of elements valid in this event vector. */
1253 	uint16_t elem_offset : 12;
1254 	/**< Offset into the vector array where valid elements start from. */
1255 	uint16_t rsvd : 3;
1256 	/**< Reserved for future use */
1257 	uint16_t attr_valid : 1;
1258 	/**< Indicates that the below union attributes have valid information.
1259 	 */
1260 	union {
1261 		/* Used by Rx/Tx adapter.
1262 		 * Indicates that all the elements in this vector belong to the
1263 		 * same port and queue pair when originating from Rx adapter,
1264 		 * valid only when event type is ETHDEV_VECTOR or
1265 		 * ETH_RX_ADAPTER_VECTOR.
1266 		 * Can also be used to indicate the Tx adapter the destination
1267 		 * port and queue of the mbufs in the vector
1268 		 */
1269 		struct {
1270 			uint16_t port;
1271 			/* Ethernet device port id. */
1272 			uint16_t queue;
1273 			/* Ethernet device queue id. */
1274 		};
1275 	};
1276 	/**< Union to hold common attributes of the vector array. */
1277 	uint64_t impl_opaque;
1278 
1279 /* empty structures do not have zero size in C++ leading to compilation errors
1280  * with clang about structure having different sizes in C and C++.
1281  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1282  * C++ builds, removing the warning.
1283  */
1284 #ifndef __cplusplus
1285 	/**< Implementation specific opaque value.
1286 	 * An implementation may use this field to hold implementation specific
1287 	 * value to share between dequeue and enqueue operation.
1288 	 * The application should not modify this field.
1289 	 */
1290 	union {
1291 #endif
1292 		struct rte_mbuf *mbufs[0];
1293 		void *ptrs[0];
1294 		uint64_t u64s[0];
1295 #ifndef __cplusplus
1296 	} __rte_aligned(16);
1297 #endif
1298 	/**< Start of the vector array union. Depending upon the event type the
1299 	 * vector array can be an array of mbufs or pointers or opaque u64
1300 	 * values.
1301 	 */
1302 } __rte_aligned(16);
1303 
1304 /* Scheduler type definitions */
1305 #define RTE_SCHED_TYPE_ORDERED          0
1306 /**< Ordered scheduling
1307  *
1308  * Events from an ordered flow of an event queue can be scheduled to multiple
1309  * ports for concurrent processing while maintaining the original event order.
1310  * This scheme enables the user to achieve high single flow throughput by
1311  * avoiding SW synchronization for ordering between ports which bound to cores.
1312  *
1313  * The source flow ordering from an event queue is maintained when events are
1314  * enqueued to their destination queue within the same ordered flow context.
1315  * An event port holds the context until application call
1316  * rte_event_dequeue_burst() from the same port, which implicitly releases
1317  * the context.
1318  * User may allow the scheduler to release the context earlier than that
1319  * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation.
1320  *
1321  * Events from the source queue appear in their original order when dequeued
1322  * from a destination queue.
1323  * Event ordering is based on the received event(s), but also other
1324  * (newly allocated or stored) events are ordered when enqueued within the same
1325  * ordered context. Events not enqueued (e.g. released or stored) within the
1326  * context are  considered missing from reordering and are skipped at this time
1327  * (but can be ordered again within another context).
1328  *
1329  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1330  */
1331 
1332 #define RTE_SCHED_TYPE_ATOMIC           1
1333 /**< Atomic scheduling
1334  *
1335  * Events from an atomic flow of an event queue can be scheduled only to a
1336  * single port at a time. The port is guaranteed to have exclusive (atomic)
1337  * access to the associated flow context, which enables the user to avoid SW
1338  * synchronization. Atomic flows also help to maintain event ordering
1339  * since only one port at a time can process events from a flow of an
1340  * event queue.
1341  *
1342  * The atomic queue synchronization context is dedicated to the port until
1343  * application call rte_event_dequeue_burst() from the same port,
1344  * which implicitly releases the context. User may allow the scheduler to
1345  * release the context earlier than that by invoking rte_event_enqueue_burst()
1346  * with RTE_EVENT_OP_RELEASE operation.
1347  *
1348  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1349  */
1350 
1351 #define RTE_SCHED_TYPE_PARALLEL         2
1352 /**< Parallel scheduling
1353  *
1354  * The scheduler performs priority scheduling, load balancing, etc. functions
1355  * but does not provide additional event synchronization or ordering.
1356  * It is free to schedule events from a single parallel flow of an event queue
1357  * to multiple events ports for concurrent processing.
1358  * The application is responsible for flow context synchronization and
1359  * event ordering (SW synchronization).
1360  *
1361  * @see rte_event_queue_setup(), rte_event_dequeue_burst()
1362  */
1363 
1364 /* Event types to classify the event source */
1365 #define RTE_EVENT_TYPE_ETHDEV           0x0
1366 /**< The event generated from ethdev subsystem */
1367 #define RTE_EVENT_TYPE_CRYPTODEV        0x1
1368 /**< The event generated from crypodev subsystem */
1369 #define RTE_EVENT_TYPE_TIMER		0x2
1370 /**< The event generated from event timer adapter */
1371 #define RTE_EVENT_TYPE_CPU              0x3
1372 /**< The event generated from cpu for pipelining.
1373  * Application may use *sub_event_type* to further classify the event
1374  */
1375 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER   0x4
1376 /**< The event generated from event eth Rx adapter */
1377 #define RTE_EVENT_TYPE_DMADEV           0x5
1378 /**< The event generated from dma subsystem */
1379 #define RTE_EVENT_TYPE_VECTOR           0x8
1380 /**< Indicates that event is a vector.
1381  * All vector event types should be a logical OR of EVENT_TYPE_VECTOR.
1382  * This simplifies the pipeline design as one can split processing the events
1383  * between vector events and normal event across event types.
1384  * Example:
1385  *	if (ev.event_type & RTE_EVENT_TYPE_VECTOR) {
1386  *		// Classify and handle vector event.
1387  *	} else {
1388  *		// Classify and handle event.
1389  *	}
1390  */
1391 #define RTE_EVENT_TYPE_ETHDEV_VECTOR                                           \
1392 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1393 /**< The event vector generated from ethdev subsystem */
1394 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1395 /**< The event vector generated from cpu for pipelining. */
1396 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR                                   \
1397 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1398 /**< The event vector generated from eth Rx adapter. */
1399 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
1400 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1401 /**< The event vector generated from cryptodev adapter. */
1402 
1403 #define RTE_EVENT_TYPE_MAX              0x10
1404 /**< Maximum number of event types */
1405 
1406 /* Event enqueue operations */
1407 #define RTE_EVENT_OP_NEW                0
1408 /**< The event producers use this operation to inject a new event to the
1409  * event device.
1410  */
1411 #define RTE_EVENT_OP_FORWARD            1
1412 /**< The CPU use this operation to forward the event to different event queue or
1413  * change to new application specific flow or schedule type to enable
1414  * pipelining.
1415  *
1416  * This operation must only be enqueued to the same port that the
1417  * event to be forwarded was dequeued from.
1418  */
1419 #define RTE_EVENT_OP_RELEASE            2
1420 /**< Release the flow context associated with the schedule type.
1421  *
1422  * If current flow's scheduler type method is *RTE_SCHED_TYPE_ATOMIC*
1423  * then this function hints the scheduler that the user has completed critical
1424  * section processing in the current atomic context.
1425  * The scheduler is now allowed to schedule events from the same flow from
1426  * an event queue to another port. However, the context may be still held
1427  * until the next rte_event_dequeue_burst() call, this call allows but does not
1428  * force the scheduler to release the context early.
1429  *
1430  * Early atomic context release may increase parallelism and thus system
1431  * performance, but the user needs to design carefully the split into critical
1432  * vs non-critical sections.
1433  *
1434  * If current flow's scheduler type method is *RTE_SCHED_TYPE_ORDERED*
1435  * then this function hints the scheduler that the user has done all that need
1436  * to maintain event order in the current ordered context.
1437  * The scheduler is allowed to release the ordered context of this port and
1438  * avoid reordering any following enqueues.
1439  *
1440  * Early ordered context release may increase parallelism and thus system
1441  * performance.
1442  *
1443  * If current flow's scheduler type method is *RTE_SCHED_TYPE_PARALLEL*
1444  * or no scheduling context is held then this function may be an NOOP,
1445  * depending on the implementation.
1446  *
1447  * This operation must only be enqueued to the same port that the
1448  * event to be released was dequeued from.
1449  */
1450 
1451 /**
1452  * The generic *rte_event* structure to hold the event attributes
1453  * for dequeue and enqueue operation
1454  */
1455 struct rte_event {
1456 	/** WORD0 */
1457 	union {
1458 		uint64_t event;
1459 		/** Event attributes for dequeue or enqueue operation */
1460 		struct {
1461 			uint32_t flow_id:20;
1462 			/**< Targeted flow identifier for the enqueue and
1463 			 * dequeue operation.
1464 			 * The value must be in the range of
1465 			 * [0, nb_event_queue_flows - 1] which
1466 			 * previously supplied to rte_event_dev_configure().
1467 			 */
1468 			uint32_t sub_event_type:8;
1469 			/**< Sub-event types based on the event source.
1470 			 * @see RTE_EVENT_TYPE_CPU
1471 			 */
1472 			uint32_t event_type:4;
1473 			/**< Event type to classify the event source.
1474 			 * @see RTE_EVENT_TYPE_ETHDEV, (RTE_EVENT_TYPE_*)
1475 			 */
1476 			uint8_t op:2;
1477 			/**< The type of event enqueue operation - new/forward/
1478 			 * etc.This field is not preserved across an instance
1479 			 * and is undefined on dequeue.
1480 			 * @see RTE_EVENT_OP_NEW, (RTE_EVENT_OP_*)
1481 			 */
1482 			uint8_t rsvd:4;
1483 			/**< Reserved for future use */
1484 			uint8_t sched_type:2;
1485 			/**< Scheduler synchronization type (RTE_SCHED_TYPE_*)
1486 			 * associated with flow id on a given event queue
1487 			 * for the enqueue and dequeue operation.
1488 			 */
1489 			uint8_t queue_id;
1490 			/**< Targeted event queue identifier for the enqueue or
1491 			 * dequeue operation.
1492 			 * The value must be in the range of
1493 			 * [0, nb_event_queues - 1] which previously supplied to
1494 			 * rte_event_dev_configure().
1495 			 */
1496 			uint8_t priority;
1497 			/**< Event priority relative to other events in the
1498 			 * event queue. The requested priority should in the
1499 			 * range of  [RTE_EVENT_DEV_PRIORITY_HIGHEST,
1500 			 * RTE_EVENT_DEV_PRIORITY_LOWEST].
1501 			 * The implementation shall normalize the requested
1502 			 * priority to supported priority value.
1503 			 * Valid when the device has
1504 			 * RTE_EVENT_DEV_CAP_EVENT_QOS capability.
1505 			 */
1506 			uint8_t impl_opaque;
1507 			/**< Implementation specific opaque value.
1508 			 * An implementation may use this field to hold
1509 			 * implementation specific value to share between
1510 			 * dequeue and enqueue operation.
1511 			 * The application should not modify this field.
1512 			 */
1513 		};
1514 	};
1515 	/** WORD1 */
1516 	union {
1517 		uint64_t u64;
1518 		/**< Opaque 64-bit value */
1519 		void *event_ptr;
1520 		/**< Opaque event pointer */
1521 		struct rte_mbuf *mbuf;
1522 		/**< mbuf pointer if dequeued event is associated with mbuf */
1523 		struct rte_event_vector *vec;
1524 		/**< Event vector pointer. */
1525 	};
1526 };
1527 
1528 /* Ethdev Rx adapter capability bitmap flags */
1529 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT	0x1
1530 /**< This flag is sent when the packet transfer mechanism is in HW.
1531  * Ethdev can send packets to the event device using internal event port.
1532  */
1533 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ	0x2
1534 /**< Adapter supports multiple event queues per ethdev. Every ethdev
1535  * Rx queue can be connected to a unique event queue.
1536  */
1537 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
1538 /**< The application can override the adapter generated flow ID in the
1539  * event. This flow ID can be specified when adding an ethdev Rx queue
1540  * to the adapter using the ev.flow_id member.
1541  * @see struct rte_event_eth_rx_adapter_queue_conf::ev
1542  * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
1543  */
1544 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR	0x8
1545 /**< Adapter supports event vectorization per ethdev. */
1546 
1547 /**
1548  * Retrieve the event device's ethdev Rx adapter capabilities for the
1549  * specified ethernet port
1550  *
1551  * @param dev_id
1552  *   The identifier of the device.
1553  *
1554  * @param eth_port_id
1555  *   The identifier of the ethernet device.
1556  *
1557  * @param[out] caps
1558  *   A pointer to memory filled with Rx event adapter capabilities.
1559  *
1560  * @return
1561  *   - 0: Success, driver provides Rx event adapter capabilities for the
1562  *	ethernet device.
1563  *   - <0: Error code returned by the driver function.
1564  */
1565 int
1566 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1567 				uint32_t *caps);
1568 
1569 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1570 /**< This flag is set when the timer mechanism is in HW. */
1571 
1572 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC      (1ULL << 1)
1573 /**< This flag is set if periodic mode is supported. */
1574 
1575 /**
1576  * Retrieve the event device's timer adapter capabilities.
1577  *
1578  * @param dev_id
1579  *   The identifier of the device.
1580  *
1581  * @param[out] caps
1582  *   A pointer to memory to be filled with event timer adapter capabilities.
1583  *
1584  * @return
1585  *   - 0: Success, driver provided event timer adapter capabilities.
1586  *   - <0: Error code returned by the driver function.
1587  */
1588 int
1589 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1590 
1591 /* Crypto adapter capability bitmap flag */
1592 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW   0x1
1593 /**< Flag indicates HW is capable of generating events in
1594  * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send
1595  * packets to the event device as new events using an internal
1596  * event port.
1597  */
1598 
1599 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD   0x2
1600 /**< Flag indicates HW is capable of generating events in
1601  * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send
1602  * packets to the event device as forwarded event using an
1603  * internal event port.
1604  */
1605 
1606 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND  0x4
1607 /**< Flag indicates HW is capable of mapping crypto queue pair to
1608  * event queue.
1609  */
1610 
1611 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA   0x8
1612 /**< Flag indicates HW/SW supports a mechanism to store and retrieve
1613  * the private data information along with the crypto session.
1614  */
1615 
1616 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
1617 /**< Flag indicates HW is capable of aggregating processed
1618  * crypto operations into rte_event_vector.
1619  */
1620 
1621 /**
1622  * Retrieve the event device's crypto adapter capabilities for the
1623  * specified cryptodev device
1624  *
1625  * @param dev_id
1626  *   The identifier of the device.
1627  *
1628  * @param cdev_id
1629  *   The identifier of the cryptodev device.
1630  *
1631  * @param[out] caps
1632  *   A pointer to memory filled with event adapter capabilities.
1633  *   It is expected to be pre-allocated & initialized by caller.
1634  *
1635  * @return
1636  *   - 0: Success, driver provides event adapter capabilities for the
1637  *     cryptodev device.
1638  *   - <0: Error code returned by the driver function.
1639  */
1640 int
1641 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1642 				  uint32_t *caps);
1643 
1644 /* DMA adapter capability bitmap flag */
1645 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1646 /**< Flag indicates HW is capable of generating events in
1647  * RTE_EVENT_OP_NEW enqueue operation. DMADEV will send
1648  * packets to the event device as new events using an
1649  * internal event port.
1650  */
1651 
1652 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1653 /**< Flag indicates HW is capable of generating events in
1654  * RTE_EVENT_OP_FORWARD enqueue operation. DMADEV will send
1655  * packets to the event device as forwarded event using an
1656  * internal event port.
1657  */
1658 
1659 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1660 /**< Flag indicates HW is capable of mapping DMA vchan to event queue. */
1661 
1662 /**
1663  * Retrieve the event device's DMA adapter capabilities for the
1664  * specified dmadev device
1665  *
1666  * @param dev_id
1667  *   The identifier of the device.
1668  *
1669  * @param dmadev_id
1670  *   The identifier of the dmadev device.
1671  *
1672  * @param[out] caps
1673  *   A pointer to memory filled with event adapter capabilities.
1674  *   It is expected to be pre-allocated & initialized by caller.
1675  *
1676  * @return
1677  *   - 0: Success, driver provides event adapter capabilities for the
1678  *     dmadev device.
1679  *   - <0: Error code returned by the driver function.
1680  *
1681  */
1682 __rte_experimental
1683 int
1684 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1685 
1686 /* Ethdev Tx adapter capability bitmap flags */
1687 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT	0x1
1688 /**< This flag is sent when the PMD supports a packet transmit callback
1689  */
1690 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR	0x2
1691 /**< Indicates that the Tx adapter is capable of handling event vector of
1692  * mbufs.
1693  */
1694 
1695 /**
1696  * Retrieve the event device's eth Tx adapter capabilities
1697  *
1698  * @param dev_id
1699  *   The identifier of the device.
1700  *
1701  * @param eth_port_id
1702  *   The identifier of the ethernet device.
1703  *
1704  * @param[out] caps
1705  *   A pointer to memory filled with eth Tx adapter capabilities.
1706  *
1707  * @return
1708  *   - 0: Success, driver provides eth Tx adapter capabilities.
1709  *   - <0: Error code returned by the driver function.
1710  */
1711 int
1712 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1713 				uint32_t *caps);
1714 
1715 /**
1716  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
1717  *
1718  * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag
1719  * then application can use this function to convert timeout value in
1720  * nanoseconds to implementations specific timeout value supplied in
1721  * rte_event_dequeue_burst()
1722  *
1723  * @param dev_id
1724  *   The identifier of the device.
1725  * @param ns
1726  *   Wait time in nanosecond
1727  * @param[out] timeout_ticks
1728  *   Value for the *timeout_ticks* parameter in rte_event_dequeue_burst()
1729  *
1730  * @return
1731  *  - 0 on success.
1732  *  - -ENOTSUP if the device doesn't support timeouts
1733  *  - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL
1734  *  - other values < 0 on failure.
1735  *
1736  * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
1737  * @see rte_event_dev_configure()
1738  */
1739 int
1740 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1741 					uint64_t *timeout_ticks);
1742 
1743 /**
1744  * Link multiple source event queues supplied in *queues* to the destination
1745  * event port designated by its *port_id* with associated service priority
1746  * supplied in *priorities* on the event device designated by its *dev_id*.
1747  *
1748  * The link establishment shall enable the event port *port_id* from
1749  * receiving events from the specified event queue(s) supplied in *queues*
1750  *
1751  * An event queue may link to one or more event ports.
1752  * The number of links can be established from an event queue to event port is
1753  * implementation defined.
1754  *
1755  * Event queue(s) to event port link establishment can be changed at runtime
1756  * without re-configuring the device to support scaling and to reduce the
1757  * latency of critical work by establishing the link with more event ports
1758  * at runtime.
1759  *
1760  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
1761  * than or equal to one, this function links the event queues to the default
1762  * profile_id i.e. profile_id 0 of the event port.
1763  *
1764  * @param dev_id
1765  *   The identifier of the device.
1766  *
1767  * @param port_id
1768  *   Event port identifier to select the destination port to link.
1769  *
1770  * @param queues
1771  *   Points to an array of *nb_links* event queues to be linked
1772  *   to the event port.
1773  *   NULL value is allowed, in which case this function links all the configured
1774  *   event queues *nb_event_queues* which previously supplied to
1775  *   rte_event_dev_configure() to the event port *port_id*
1776  *
1777  * @param priorities
1778  *   Points to an array of *nb_links* service priorities associated with each
1779  *   event queue link to event port.
1780  *   The priority defines the event port's servicing priority for
1781  *   event queue, which may be ignored by an implementation.
1782  *   The requested priority should in the range of
1783  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
1784  *   The implementation shall normalize the requested priority to
1785  *   implementation supported priority value.
1786  *   NULL value is allowed, in which case this function links the event queues
1787  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
1788  *
1789  * @param nb_links
1790  *   The number of links to establish. This parameter is ignored if queues is
1791  *   NULL.
1792  *
1793  * @return
1794  * The number of links actually established. The return value can be less than
1795  * the value of the *nb_links* parameter when the implementation has the
1796  * limitation on specific queue to port link establishment or if invalid
1797  * parameters are specified in *queues*
1798  * If the return value is less than *nb_links*, the remaining links at the end
1799  * of link[] are not established, and the caller has to take care of them.
1800  * If return value is less than *nb_links* then implementation shall update the
1801  * rte_errno accordingly, Possible rte_errno values are
1802  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
1803  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1804  * (EINVAL) Invalid parameter
1805  */
1806 int
1807 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1808 		    const uint8_t queues[], const uint8_t priorities[],
1809 		    uint16_t nb_links);
1810 
1811 /**
1812  * Unlink multiple source event queues supplied in *queues* from the destination
1813  * event port designated by its *port_id* on the event device designated
1814  * by its *dev_id*.
1815  *
1816  * The unlink call issues an async request to disable the event port *port_id*
1817  * from receiving events from the specified event queue *queue_id*.
1818  * Event queue(s) to event port unlink establishment can be changed at runtime
1819  * without re-configuring the device.
1820  *
1821  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
1822  * than or equal to one, this function unlinks the event queues from the default
1823  * profile identifier i.e. profile 0 of the event port.
1824  *
1825  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
1826  *
1827  * @param dev_id
1828  *   The identifier of the device.
1829  *
1830  * @param port_id
1831  *   Event port identifier to select the destination port to unlink.
1832  *
1833  * @param queues
1834  *   Points to an array of *nb_unlinks* event queues to be unlinked
1835  *   from the event port.
1836  *   NULL value is allowed, in which case this function unlinks all the
1837  *   event queue(s) from the event port *port_id*.
1838  *
1839  * @param nb_unlinks
1840  *   The number of unlinks to establish. This parameter is ignored if queues is
1841  *   NULL.
1842  *
1843  * @return
1844  * The number of unlinks successfully requested. The return value can be less
1845  * than the value of the *nb_unlinks* parameter when the implementation has the
1846  * limitation on specific queue to port unlink establishment or
1847  * if invalid parameters are specified.
1848  * If the return value is less than *nb_unlinks*, the remaining queues at the
1849  * end of queues[] are not unlinked, and the caller has to take care of them.
1850  * If return value is less than *nb_unlinks* then implementation shall update
1851  * the rte_errno accordingly, Possible rte_errno values are
1852  * (EINVAL) Invalid parameter
1853  */
1854 int
1855 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1856 		      uint8_t queues[], uint16_t nb_unlinks);
1857 
1858 /**
1859  * Link multiple source event queues supplied in *queues* to the destination
1860  * event port designated by its *port_id* with associated profile identifier
1861  * supplied in *profile_id* with service priorities supplied in *priorities*
1862  * on the event device designated by its *dev_id*.
1863  *
1864  * If *profile_id* is set to 0 then, the links created by the call `rte_event_port_link`
1865  * will be overwritten.
1866  *
1867  * Event ports by default use profile_id 0 unless it is changed using the
1868  * call ``rte_event_port_profile_switch()``.
1869  *
1870  * The link establishment shall enable the event port *port_id* from
1871  * receiving events from the specified event queue(s) supplied in *queues*
1872  *
1873  * An event queue may link to one or more event ports.
1874  * The number of links can be established from an event queue to event port is
1875  * implementation defined.
1876  *
1877  * Event queue(s) to event port link establishment can be changed at runtime
1878  * without re-configuring the device to support scaling and to reduce the
1879  * latency of critical work by establishing the link with more event ports
1880  * at runtime.
1881  *
1882  * @param dev_id
1883  *   The identifier of the device.
1884  *
1885  * @param port_id
1886  *   Event port identifier to select the destination port to link.
1887  *
1888  * @param queues
1889  *   Points to an array of *nb_links* event queues to be linked
1890  *   to the event port.
1891  *   NULL value is allowed, in which case this function links all the configured
1892  *   event queues *nb_event_queues* which previously supplied to
1893  *   rte_event_dev_configure() to the event port *port_id*
1894  *
1895  * @param priorities
1896  *   Points to an array of *nb_links* service priorities associated with each
1897  *   event queue link to event port.
1898  *   The priority defines the event port's servicing priority for
1899  *   event queue, which may be ignored by an implementation.
1900  *   The requested priority should in the range of
1901  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
1902  *   The implementation shall normalize the requested priority to
1903  *   implementation supported priority value.
1904  *   NULL value is allowed, in which case this function links the event queues
1905  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
1906  *
1907  * @param nb_links
1908  *   The number of links to establish. This parameter is ignored if queues is
1909  *   NULL.
1910  *
1911  * @param profile_id
1912  *   The profile identifier associated with the links between event queues and
1913  *   event port. Should be less than the max capability reported by
1914  *   ``rte_event_dev_info::max_profiles_per_port``
1915  *
1916  * @return
1917  * The number of links actually established. The return value can be less than
1918  * the value of the *nb_links* parameter when the implementation has the
1919  * limitation on specific queue to port link establishment or if invalid
1920  * parameters are specified in *queues*
1921  * If the return value is less than *nb_links*, the remaining links at the end
1922  * of link[] are not established, and the caller has to take care of them.
1923  * If return value is less than *nb_links* then implementation shall update the
1924  * rte_errno accordingly, Possible rte_errno values are
1925  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
1926  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1927  * (EINVAL) Invalid parameter
1928  *
1929  */
1930 __rte_experimental
1931 int
1932 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
1933 				 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
1934 
1935 /**
1936  * Unlink multiple source event queues supplied in *queues* that belong to profile
1937  * designated by *profile_id* from the destination event port designated by its
1938  * *port_id* on the event device designated by its *dev_id*.
1939  *
1940  * If *profile_id* is set to 0 i.e., the default profile then, then this function
1941  * will act as ``rte_event_port_unlink``.
1942  *
1943  * The unlink call issues an async request to disable the event port *port_id*
1944  * from receiving events from the specified event queue *queue_id*.
1945  * Event queue(s) to event port unlink establishment can be changed at runtime
1946  * without re-configuring the device.
1947  *
1948  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
1949  *
1950  * @param dev_id
1951  *   The identifier of the device.
1952  *
1953  * @param port_id
1954  *   Event port identifier to select the destination port to unlink.
1955  *
1956  * @param queues
1957  *   Points to an array of *nb_unlinks* event queues to be unlinked
1958  *   from the event port.
1959  *   NULL value is allowed, in which case this function unlinks all the
1960  *   event queue(s) from the event port *port_id*.
1961  *
1962  * @param nb_unlinks
1963  *   The number of unlinks to establish. This parameter is ignored if queues is
1964  *   NULL.
1965  *
1966  * @param profile_id
1967  *   The profile identifier associated with the links between event queues and
1968  *   event port. Should be less than the max capability reported by
1969  *   ``rte_event_dev_info::max_profiles_per_port``
1970  *
1971  * @return
1972  * The number of unlinks successfully requested. The return value can be less
1973  * than the value of the *nb_unlinks* parameter when the implementation has the
1974  * limitation on specific queue to port unlink establishment or
1975  * if invalid parameters are specified.
1976  * If the return value is less than *nb_unlinks*, the remaining queues at the
1977  * end of queues[] are not unlinked, and the caller has to take care of them.
1978  * If return value is less than *nb_unlinks* then implementation shall update
1979  * the rte_errno accordingly, Possible rte_errno values are
1980  * (EINVAL) Invalid parameter
1981  *
1982  */
1983 __rte_experimental
1984 int
1985 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1986 			      uint16_t nb_unlinks, uint8_t profile_id);
1987 
1988 /**
1989  * Returns the number of unlinks in progress.
1990  *
1991  * This function provides the application with a method to detect when an
1992  * unlink has been completed by the implementation.
1993  *
1994  * @see rte_event_port_unlink() to issue unlink requests.
1995  *
1996  * @param dev_id
1997  *   The identifier of the device.
1998  *
1999  * @param port_id
2000  *   Event port identifier to select port to check for unlinks in progress.
2001  *
2002  * @return
2003  * The number of unlinks that are in progress. A return of zero indicates that
2004  * there are no outstanding unlink requests. A positive return value indicates
2005  * the number of unlinks that are in progress, but are not yet complete.
2006  * A negative return value indicates an error, -EINVAL indicates an invalid
2007  * parameter passed for *dev_id* or *port_id*.
2008  */
2009 int
2010 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
2011 
2012 /**
2013  * Retrieve the list of source event queues and its associated service priority
2014  * linked to the destination event port designated by its *port_id*
2015  * on the event device designated by its *dev_id*.
2016  *
2017  * @param dev_id
2018  *   The identifier of the device.
2019  *
2020  * @param port_id
2021  *   Event port identifier.
2022  *
2023  * @param[out] queues
2024  *   Points to an array of *queues* for output.
2025  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2026  *   store the event queue(s) linked with event port *port_id*
2027  *
2028  * @param[out] priorities
2029  *   Points to an array of *priorities* for output.
2030  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2031  *   store the service priority associated with each event queue linked
2032  *
2033  * @return
2034  * The number of links established on the event port designated by its
2035  *  *port_id*.
2036  * - <0 on failure.
2037  */
2038 int
2039 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
2040 			 uint8_t queues[], uint8_t priorities[]);
2041 
2042 /**
2043  * Retrieve the list of source event queues and its service priority
2044  * associated to a *profile_id* and linked to the destination event port
2045  * designated by its *port_id* on the event device designated by its *dev_id*.
2046  *
2047  * @param dev_id
2048  *   The identifier of the device.
2049  *
2050  * @param port_id
2051  *   Event port identifier.
2052  *
2053  * @param[out] queues
2054  *   Points to an array of *queues* for output.
2055  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2056  *   store the event queue(s) linked with event port *port_id*
2057  *
2058  * @param[out] priorities
2059  *   Points to an array of *priorities* for output.
2060  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
2061  *   store the service priority associated with each event queue linked
2062  *
2063  * @param profile_id
2064  *   The profile identifier associated with the links between event queues and
2065  *   event port. Should be less than the max capability reported by
2066  *   ``rte_event_dev_info::max_profiles_per_port``
2067  *
2068  * @return
2069  * The number of links established on the event port designated by its
2070  *  *port_id*.
2071  * - <0 on failure.
2072  */
2073 __rte_experimental
2074 int
2075 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2076 				 uint8_t priorities[], uint8_t profile_id);
2077 
2078 /**
2079  * Retrieve the service ID of the event dev. If the adapter doesn't use
2080  * a rte_service function, this function returns -ESRCH.
2081  *
2082  * @param dev_id
2083  *   The identifier of the device.
2084  *
2085  * @param [out] service_id
2086  *   A pointer to a uint32_t, to be filled in with the service id.
2087  *
2088  * @return
2089  *   - 0: Success
2090  *   - <0: Error code on failure, if the event dev doesn't use a rte_service
2091  *   function, this function returns -ESRCH.
2092  */
2093 int
2094 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
2095 
2096 /**
2097  * Dump internal information about *dev_id* to the FILE* provided in *f*.
2098  *
2099  * @param dev_id
2100  *   The identifier of the device.
2101  *
2102  * @param f
2103  *   A pointer to a file for output
2104  *
2105  * @return
2106  *   - 0: on success
2107  *   - <0: on failure.
2108  */
2109 int
2110 rte_event_dev_dump(uint8_t dev_id, FILE *f);
2111 
2112 /** Maximum name length for extended statistics counters */
2113 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
2114 
2115 /**
2116  * Selects the component of the eventdev to retrieve statistics from.
2117  */
2118 enum rte_event_dev_xstats_mode {
2119 	RTE_EVENT_DEV_XSTATS_DEVICE,
2120 	RTE_EVENT_DEV_XSTATS_PORT,
2121 	RTE_EVENT_DEV_XSTATS_QUEUE,
2122 };
2123 
2124 /**
2125  * A name-key lookup element for extended statistics.
2126  *
2127  * This structure is used to map between names and ID numbers
2128  * for extended ethdev statistics.
2129  */
2130 struct rte_event_dev_xstats_name {
2131 	char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
2132 };
2133 
2134 /**
2135  * Retrieve names of extended statistics of an event device.
2136  *
2137  * @param dev_id
2138  *   The identifier of the event device.
2139  * @param mode
2140  *   The mode of statistics to retrieve. Choices include the device statistics,
2141  *   port statistics or queue statistics.
2142  * @param queue_port_id
2143  *   Used to specify the port or queue number in queue or port mode, and is
2144  *   ignored in device mode.
2145  * @param[out] xstats_names
2146  *   Block of memory to insert names into. Must be at least size in capacity.
2147  *   If set to NULL, function returns required capacity.
2148  * @param[out] ids
2149  *   Block of memory to insert ids into. Must be at least size in capacity.
2150  *   If set to NULL, function returns required capacity. The id values returned
2151  *   can be passed to *rte_event_dev_xstats_get* to select statistics.
2152  * @param size
2153  *   Capacity of xstats_names (number of names).
2154  * @return
2155  *   - positive value lower or equal to size: success. The return value
2156  *     is the number of entries filled in the stats table.
2157  *   - positive value higher than size: error, the given statistics table
2158  *     is too small. The return value corresponds to the size that should
2159  *     be given to succeed. The entries in the table are not valid and
2160  *     shall not be used by the caller.
2161  *   - negative value on error:
2162  *        -ENODEV for invalid *dev_id*
2163  *        -EINVAL for invalid mode, queue port or id parameters
2164  *        -ENOTSUP if the device doesn't support this function.
2165  */
2166 int
2167 rte_event_dev_xstats_names_get(uint8_t dev_id,
2168 			       enum rte_event_dev_xstats_mode mode,
2169 			       uint8_t queue_port_id,
2170 			       struct rte_event_dev_xstats_name *xstats_names,
2171 			       uint64_t *ids,
2172 			       unsigned int size);
2173 
2174 /**
2175  * Retrieve extended statistics of an event device.
2176  *
2177  * @param dev_id
2178  *   The identifier of the device.
2179  * @param mode
2180  *  The mode of statistics to retrieve. Choices include the device statistics,
2181  *  port statistics or queue statistics.
2182  * @param queue_port_id
2183  *   Used to specify the port or queue number in queue or port mode, and is
2184  *   ignored in device mode.
2185  * @param ids
2186  *   The id numbers of the stats to get. The ids can be got from the stat
2187  *   position in the stat list from rte_event_dev_get_xstats_names(), or
2188  *   by using rte_event_dev_xstats_by_name_get().
2189  * @param[out] values
2190  *   The values for each stats request by ID.
2191  * @param n
2192  *   The number of stats requested
2193  * @return
2194  *   - positive value: number of stat entries filled into the values array
2195  *   - negative value on error:
2196  *        -ENODEV for invalid *dev_id*
2197  *        -EINVAL for invalid mode, queue port or id parameters
2198  *        -ENOTSUP if the device doesn't support this function.
2199  */
2200 int
2201 rte_event_dev_xstats_get(uint8_t dev_id,
2202 			 enum rte_event_dev_xstats_mode mode,
2203 			 uint8_t queue_port_id,
2204 			 const uint64_t ids[],
2205 			 uint64_t values[], unsigned int n);
2206 
2207 /**
2208  * Retrieve the value of a single stat by requesting it by name.
2209  *
2210  * @param dev_id
2211  *   The identifier of the device
2212  * @param name
2213  *   The stat name to retrieve
2214  * @param[out] id
2215  *   If non-NULL, the numerical id of the stat will be returned, so that further
2216  *   requests for the stat can be got using rte_event_dev_xstats_get, which will
2217  *   be faster as it doesn't need to scan a list of names for the stat.
2218  *   If the stat cannot be found, the id returned will be (unsigned)-1.
2219  * @return
2220  *   - positive value or zero: the stat value
2221  *   - negative value: -EINVAL if stat not found, -ENOTSUP if not supported.
2222  */
2223 uint64_t
2224 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2225 				 uint64_t *id);
2226 
2227 /**
2228  * Reset the values of the xstats of the selected component in the device.
2229  *
2230  * @param dev_id
2231  *   The identifier of the device
2232  * @param mode
2233  *   The mode of the statistics to reset. Choose from device, queue or port.
2234  * @param queue_port_id
2235  *   The queue or port to reset. 0 and positive values select ports and queues,
2236  *   while -1 indicates all ports or queues.
2237  * @param ids
2238  *   Selects specific statistics to be reset. When NULL, all statistics selected
2239  *   by *mode* will be reset. If non-NULL, must point to array of at least
2240  *   *nb_ids* size.
2241  * @param nb_ids
2242  *   The number of ids available from the *ids* array. Ignored when ids is NULL.
2243  * @return
2244  *   - zero: successfully reset the statistics to zero
2245  *   - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported.
2246  */
2247 int
2248 rte_event_dev_xstats_reset(uint8_t dev_id,
2249 			   enum rte_event_dev_xstats_mode mode,
2250 			   int16_t queue_port_id,
2251 			   const uint64_t ids[],
2252 			   uint32_t nb_ids);
2253 
2254 /**
2255  * Trigger the eventdev self test.
2256  *
2257  * @param dev_id
2258  *   The identifier of the device
2259  * @return
2260  *   - 0: Selftest successful
2261  *   - -ENOTSUP if the device doesn't support selftest
2262  *   - other values < 0 on failure.
2263  */
2264 int rte_event_dev_selftest(uint8_t dev_id);
2265 
2266 /**
2267  * Get the memory required per event vector based on the number of elements per
2268  * vector.
2269  * This should be used to create the mempool that holds the event vectors.
2270  *
2271  * @param name
2272  *   The name of the vector pool.
2273  * @param n
2274  *   The number of elements in the mbuf pool.
2275  * @param cache_size
2276  *   Size of the per-core object cache. See rte_mempool_create() for
2277  *   details.
2278  * @param nb_elem
2279  *   The number of elements that a single event vector should be able to hold.
2280  * @param socket_id
2281  *   The socket identifier where the memory should be allocated. The
2282  *   value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the
2283  *   reserved zone
2284  *
2285  * @return
2286  *   The pointer to the newly allocated mempool, on success. NULL on error
2287  *   with rte_errno set appropriately. Possible rte_errno values include:
2288  *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
2289  *    - E_RTE_SECONDARY - function was called from a secondary process instance
2290  *    - EINVAL - cache size provided is too large, or priv_size is not aligned.
2291  *    - ENOSPC - the maximum number of memzones has already been allocated
2292  *    - EEXIST - a memzone with the same name already exists
2293  *    - ENOMEM - no appropriate memory area found in which to create memzone
2294  *    - ENAMETOOLONG - mempool name requested is too long.
2295  */
2296 struct rte_mempool *
2297 rte_event_vector_pool_create(const char *name, unsigned int n,
2298 			     unsigned int cache_size, uint16_t nb_elem,
2299 			     int socket_id);
2300 
2301 #include <rte_eventdev_core.h>
2302 
2303 static __rte_always_inline uint16_t
2304 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2305 			  const struct rte_event ev[], uint16_t nb_events,
2306 			  const event_enqueue_burst_t fn)
2307 {
2308 	const struct rte_event_fp_ops *fp_ops;
2309 	void *port;
2310 
2311 	fp_ops = &rte_event_fp_ops[dev_id];
2312 	port = fp_ops->data[port_id];
2313 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2314 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2315 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2316 		rte_errno = EINVAL;
2317 		return 0;
2318 	}
2319 
2320 	if (port == NULL) {
2321 		rte_errno = EINVAL;
2322 		return 0;
2323 	}
2324 #endif
2325 	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2326 	/*
2327 	 * Allow zero cost non burst mode routine invocation if application
2328 	 * requests nb_events as const one
2329 	 */
2330 	if (nb_events == 1)
2331 		return (fp_ops->enqueue)(port, ev);
2332 	else
2333 		return fn(port, ev, nb_events);
2334 }
2335 
2336 /**
2337  * Enqueue a burst of events objects or an event object supplied in *rte_event*
2338  * structure on an  event device designated by its *dev_id* through the event
2339  * port specified by *port_id*. Each event object specifies the event queue on
2340  * which it will be enqueued.
2341  *
2342  * The *nb_events* parameter is the number of event objects to enqueue which are
2343  * supplied in the *ev* array of *rte_event* structure.
2344  *
2345  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2346  * enqueued to the same port that their associated events were dequeued from.
2347  *
2348  * The rte_event_enqueue_burst() function returns the number of
2349  * events objects it actually enqueued. A return value equal to *nb_events*
2350  * means that all event objects have been enqueued.
2351  *
2352  * @param dev_id
2353  *   The identifier of the device.
2354  * @param port_id
2355  *   The identifier of the event port.
2356  * @param ev
2357  *   Points to an array of *nb_events* objects of type *rte_event* structure
2358  *   which contain the event object enqueue operations to be processed.
2359  * @param nb_events
2360  *   The number of event objects to enqueue, typically number of
2361  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2362  *   available for this port.
2363  *
2364  * @return
2365  *   The number of event objects actually enqueued on the event device. The
2366  *   return value can be less than the value of the *nb_events* parameter when
2367  *   the event devices queue is full or if invalid parameters are specified in a
2368  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2369  *   events at the end of ev[] are not consumed and the caller has to take care
2370  *   of them, and rte_errno is set accordingly. Possible errno values include:
2371  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2372  *              ID is invalid, or an event's sched type doesn't match the
2373  *              capabilities of the destination queue.
2374  *   - ENOSPC   The event port was backpressured and unable to enqueue
2375  *              one or more events. This error code is only applicable to
2376  *              closed systems.
2377  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2378  */
2379 static inline uint16_t
2380 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2381 			const struct rte_event ev[], uint16_t nb_events)
2382 {
2383 	const struct rte_event_fp_ops *fp_ops;
2384 
2385 	fp_ops = &rte_event_fp_ops[dev_id];
2386 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2387 					 fp_ops->enqueue_burst);
2388 }
2389 
2390 /**
2391  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
2392  * an event device designated by its *dev_id* through the event port specified
2393  * by *port_id*.
2394  *
2395  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2396  * application can use this API when the all objects in the burst contains
2397  * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
2398  * function can provide the additional hint to the PMD and optimize if possible.
2399  *
2400  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2401  * has event object of operation type != RTE_EVENT_OP_NEW.
2402  *
2403  * @param dev_id
2404  *   The identifier of the device.
2405  * @param port_id
2406  *   The identifier of the event port.
2407  * @param ev
2408  *   Points to an array of *nb_events* objects of type *rte_event* structure
2409  *   which contain the event object enqueue operations to be processed.
2410  * @param nb_events
2411  *   The number of event objects to enqueue, typically number of
2412  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2413  *   available for this port.
2414  *
2415  * @return
2416  *   The number of event objects actually enqueued on the event device. The
2417  *   return value can be less than the value of the *nb_events* parameter when
2418  *   the event devices queue is full or if invalid parameters are specified in a
2419  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2420  *   events at the end of ev[] are not consumed and the caller has to take care
2421  *   of them, and rte_errno is set accordingly. Possible errno values include:
2422  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2423  *              ID is invalid, or an event's sched type doesn't match the
2424  *              capabilities of the destination queue.
2425  *   - ENOSPC   The event port was backpressured and unable to enqueue
2426  *              one or more events. This error code is only applicable to
2427  *              closed systems.
2428  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2429  * @see rte_event_enqueue_burst()
2430  */
2431 static inline uint16_t
2432 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2433 			    const struct rte_event ev[], uint16_t nb_events)
2434 {
2435 	const struct rte_event_fp_ops *fp_ops;
2436 
2437 	fp_ops = &rte_event_fp_ops[dev_id];
2438 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2439 					 fp_ops->enqueue_new_burst);
2440 }
2441 
2442 /**
2443  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
2444  * on an event device designated by its *dev_id* through the event port
2445  * specified by *port_id*.
2446  *
2447  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2448  * application can use this API when the all objects in the burst contains
2449  * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
2450  * function can provide the additional hint to the PMD and optimize if possible.
2451  *
2452  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2453  * has event object of operation type != RTE_EVENT_OP_FORWARD.
2454  *
2455  * @param dev_id
2456  *   The identifier of the device.
2457  * @param port_id
2458  *   The identifier of the event port.
2459  * @param ev
2460  *   Points to an array of *nb_events* objects of type *rte_event* structure
2461  *   which contain the event object enqueue operations to be processed.
2462  * @param nb_events
2463  *   The number of event objects to enqueue, typically number of
2464  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2465  *   available for this port.
2466  *
2467  * @return
2468  *   The number of event objects actually enqueued on the event device. The
2469  *   return value can be less than the value of the *nb_events* parameter when
2470  *   the event devices queue is full or if invalid parameters are specified in a
2471  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2472  *   events at the end of ev[] are not consumed and the caller has to take care
2473  *   of them, and rte_errno is set accordingly. Possible errno values include:
2474  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2475  *              ID is invalid, or an event's sched type doesn't match the
2476  *              capabilities of the destination queue.
2477  *   - ENOSPC   The event port was backpressured and unable to enqueue
2478  *              one or more events. This error code is only applicable to
2479  *              closed systems.
2480  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2481  * @see rte_event_enqueue_burst()
2482  */
2483 static inline uint16_t
2484 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2485 				const struct rte_event ev[], uint16_t nb_events)
2486 {
2487 	const struct rte_event_fp_ops *fp_ops;
2488 
2489 	fp_ops = &rte_event_fp_ops[dev_id];
2490 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2491 					 fp_ops->enqueue_forward_burst);
2492 }
2493 
2494 /**
2495  * Dequeue a burst of events objects or an event object from the event port
2496  * designated by its *event_port_id*, on an event device designated
2497  * by its *dev_id*.
2498  *
2499  * rte_event_dequeue_burst() does not dictate the specifics of scheduling
2500  * algorithm as each eventdev driver may have different criteria to schedule
2501  * an event. However, in general, from an application perspective scheduler may
2502  * use the following scheme to dispatch an event to the port.
2503  *
2504  * 1) Selection of event queue based on
2505  *   a) The list of event queues are linked to the event port.
2506  *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
2507  *   queue selection from list is based on event queue priority relative to
2508  *   other event queue supplied as *priority* in rte_event_queue_setup()
2509  *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
2510  *   queue selection from the list is based on event priority supplied as
2511  *   *priority* in rte_event_enqueue_burst()
2512  * 2) Selection of event
2513  *   a) The number of flows available in selected event queue.
2514  *   b) Schedule type method associated with the event
2515  *
2516  * The *nb_events* parameter is the maximum number of event objects to dequeue
2517  * which are returned in the *ev* array of *rte_event* structure.
2518  *
2519  * The rte_event_dequeue_burst() function returns the number of events objects
2520  * it actually dequeued. A return value equal to *nb_events* means that all
2521  * event objects have been dequeued.
2522  *
2523  * The number of events dequeued is the number of scheduler contexts held by
2524  * this port. These contexts are automatically released in the next
2525  * rte_event_dequeue_burst() invocation if the port supports implicit
2526  * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
2527  * operation can be used to release the contexts early.
2528  *
2529  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2530  * enqueued to the same port that their associated events were dequeued from.
2531  *
2532  * @param dev_id
2533  *   The identifier of the device.
2534  * @param port_id
2535  *   The identifier of the event port.
2536  * @param[out] ev
2537  *   Points to an array of *nb_events* objects of type *rte_event* structure
2538  *   for output to be populated with the dequeued event objects.
2539  * @param nb_events
2540  *   The maximum number of event objects to dequeue, typically number of
2541  *   rte_event_port_dequeue_depth() available for this port.
2542  *
2543  * @param timeout_ticks
2544  *   - 0 no-wait, returns immediately if there is no event.
2545  *   - >0 wait for the event, if the device is configured with
2546  *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
2547  *   at least one event is available or *timeout_ticks* time.
2548  *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
2549  *   then this function will wait until the event available or
2550  *   *dequeue_timeout_ns* ns which was previously supplied to
2551  *   rte_event_dev_configure()
2552  *
2553  * @return
2554  * The number of event objects actually dequeued from the port. The return
2555  * value can be less than the value of the *nb_events* parameter when the
2556  * event port's queue is not full.
2557  *
2558  * @see rte_event_port_dequeue_depth()
2559  */
2560 static inline uint16_t
2561 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2562 			uint16_t nb_events, uint64_t timeout_ticks)
2563 {
2564 	const struct rte_event_fp_ops *fp_ops;
2565 	void *port;
2566 
2567 	fp_ops = &rte_event_fp_ops[dev_id];
2568 	port = fp_ops->data[port_id];
2569 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2570 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2571 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2572 		rte_errno = EINVAL;
2573 		return 0;
2574 	}
2575 
2576 	if (port == NULL) {
2577 		rte_errno = EINVAL;
2578 		return 0;
2579 	}
2580 #endif
2581 	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2582 	/*
2583 	 * Allow zero cost non burst mode routine invocation if application
2584 	 * requests nb_events as const one
2585 	 */
2586 	if (nb_events == 1)
2587 		return (fp_ops->dequeue)(port, ev, timeout_ticks);
2588 	else
2589 		return (fp_ops->dequeue_burst)(port, ev, nb_events,
2590 					       timeout_ticks);
2591 }
2592 
2593 #define RTE_EVENT_DEV_MAINT_OP_FLUSH          (1 << 0)
2594 /**< Force an immediately flush of any buffered events in the port,
2595  * potentially at the cost of additional overhead.
2596  *
2597  * @see rte_event_maintain()
2598  */
2599 
2600 /**
2601  * Maintain an event device.
2602  *
2603  * This function is only relevant for event devices which do not have
2604  * the @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE flag set. Such devices
2605  * require an application thread using a particular port to
2606  * periodically call rte_event_maintain() on that port during periods
2607  * which it is neither attempting to enqueue events to nor dequeue
2608  * events from the port. rte_event_maintain() is a low-overhead
2609  * function and should be called at a high rate (e.g., in the
2610  * application's poll loop).
2611  *
2612  * No port may be left unmaintained.
2613  *
2614  * At the application thread's convenience, rte_event_maintain() may
2615  * (but is not required to) be called even during periods when enqueue
2616  * or dequeue functions are being called, at the cost of a slight
2617  * increase in overhead.
2618  *
2619  * rte_event_maintain() may be called on event devices which have set
2620  * @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, in which case it is a
2621  * no-operation.
2622  *
2623  * @param dev_id
2624  *   The identifier of the device.
2625  * @param port_id
2626  *   The identifier of the event port.
2627  * @param op
2628  *   0, or @ref RTE_EVENT_DEV_MAINT_OP_FLUSH.
2629  * @return
2630  *  - 0 on success.
2631  *  - -EINVAL if *dev_id*,  *port_id*, or *op* is invalid.
2632  *
2633  * @see RTE_EVENT_DEV_CAP_MAINTENANCE_FREE
2634  */
2635 static inline int
2636 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2637 {
2638 	const struct rte_event_fp_ops *fp_ops;
2639 	void *port;
2640 
2641 	fp_ops = &rte_event_fp_ops[dev_id];
2642 	port = fp_ops->data[port_id];
2643 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2644 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2645 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2646 		return -EINVAL;
2647 
2648 	if (port == NULL)
2649 		return -EINVAL;
2650 
2651 	if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2652 		return -EINVAL;
2653 #endif
2654 	rte_eventdev_trace_maintain(dev_id, port_id, op);
2655 
2656 	if (fp_ops->maintain != NULL)
2657 		fp_ops->maintain(port, op);
2658 
2659 	return 0;
2660 }
2661 
2662 /**
2663  * Change the active profile on an event port.
2664  *
2665  * This function is used to change the current active profile on an event port
2666  * when multiple link profiles are configured on an event port through the
2667  * function call ``rte_event_port_profile_links_set``.
2668  *
2669  * On the subsequent ``rte_event_dequeue_burst`` call, only the event queues
2670  * that were associated with the newly active profile will participate in
2671  * scheduling.
2672  *
2673  * @param dev_id
2674  *   The identifier of the device.
2675  * @param port_id
2676  *   The identifier of the event port.
2677  * @param profile_id
2678  *   The identifier of the profile.
2679  * @return
2680  *  - 0 on success.
2681  *  - -EINVAL if *dev_id*,  *port_id*, or *profile_id* is invalid.
2682  */
2683 static inline uint8_t
2684 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2685 {
2686 	const struct rte_event_fp_ops *fp_ops;
2687 	void *port;
2688 
2689 	fp_ops = &rte_event_fp_ops[dev_id];
2690 	port = fp_ops->data[port_id];
2691 
2692 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2693 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2694 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2695 		return -EINVAL;
2696 
2697 	if (port == NULL)
2698 		return -EINVAL;
2699 
2700 	if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2701 		return -EINVAL;
2702 #endif
2703 	rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2704 
2705 	return fp_ops->profile_switch(port, profile_id);
2706 }
2707 
2708 #ifdef __cplusplus
2709 }
2710 #endif
2711 
2712 #endif /* _RTE_EVENTDEV_H_ */
2713