xref: /dpdk/lib/eventdev/rte_eventdev.h (revision de972a780e1672fa1d4d99f5cdcf7a1d9eb92e3c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
11 /**
12  * @file
13  *
14  * RTE Event Device API
15  * ====================
16  *
17  * In a traditional DPDK application model, the application polls Ethdev port RX
18  * queues to look for work, and processing is done in a run-to-completion manner,
19  * after which the packets are transmitted on a Ethdev TX queue. Load is
20  * distributed by statically assigning ports and queues to lcores, and NIC
21  * receive-side scaling (RSS), or similar, is employed to distribute network flows
22  * (and thus work) on the same port across multiple RX queues.
23  *
24  * In contrast, in an event-driven model, as supported by this "eventdev" library,
25  * incoming packets (or other input events) are fed into an event device, which
26  * schedules those packets across the available lcores, in accordance with its configuration.
27  * This event-driven programming model offers applications automatic multicore scaling,
28  * dynamic load balancing, pipelining, packet order maintenance, synchronization,
29  * and prioritization/quality of service.
30  *
31  * The Event Device API is composed of two parts:
32  *
33  * - The application-oriented Event API that includes functions to setup
34  *   an event device (configure it, setup its queues, ports and start it), to
35  *   establish the links between queues and ports to receive events, and so on.
36  *
37  * - The driver-oriented Event API that exports a function allowing
38  *   an event poll Mode Driver (PMD) to register itself as
39  *   an event device driver.
40  *
41  * Application-oriented Event API
42  * ------------------------------
43  *
44  * Event device components:
45  *
46  *                     +-----------------+
47  *                     | +-------------+ |
48  *        +-------+    | |    flow 0   | |
49  *        |Packet |    | +-------------+ |
50  *        |event  |    | +-------------+ |
51  *        |       |    | |    flow 1   | |port_link(port0, queue0)
52  *        +-------+    | +-------------+ |     |     +--------+
53  *        +-------+    | +-------------+ o-----v-----o        |dequeue +------+
54  *        |Crypto |    | |    flow n   | |           | event  +------->|Core 0|
55  *        |work   |    | +-------------+ o----+      | port 0 |        |      |
56  *        |done ev|    |  event queue 0  |    |      +--------+        +------+
57  *        +-------+    +-----------------+    |
58  *        +-------+                           |
59  *        |Timer  |    +-----------------+    |      +--------+
60  *        |expiry |    | +-------------+ |    +------o        |dequeue +------+
61  *        |event  |    | |    flow 0   | o-----------o event  +------->|Core 1|
62  *        +-------+    | +-------------+ |      +----o port 1 |        |      |
63  *       Event enqueue | +-------------+ |      |    +--------+        +------+
64  *     o-------------> | |    flow 1   | |      |
65  *        enqueue(     | +-------------+ |      |
66  *        queue_id,    |                 |      |    +--------+        +------+
67  *        flow_id,     | +-------------+ |      |    |        |dequeue |Core 2|
68  *        sched_type,  | |    flow n   | o-----------o event  +------->|      |
69  *        event_type,  | +-------------+ |      |    | port 2 |        +------+
70  *        subev_type,  |  event queue 1  |      |    +--------+
71  *        event)       +-----------------+      |    +--------+
72  *                                              |    |        |dequeue +------+
73  *        +-------+    +-----------------+      |    | event  +------->|Core n|
74  *        |Core   |    | +-------------+ o-----------o port n |        |      |
75  *        |(SW)   |    | |    flow 0   | |      |    +--------+        +--+---+
76  *        |event  |    | +-------------+ |      |                         |
77  *        +-------+    | +-------------+ |      |                         |
78  *            ^        | |    flow 1   | |      |                         |
79  *            |        | +-------------+ o------+                         |
80  *            |        | +-------------+ |                                |
81  *            |        | |    flow n   | |                                |
82  *            |        | +-------------+ |                                |
83  *            |        |  event queue n  |                                |
84  *            |        +-----------------+                                |
85  *            |                                                           |
86  *            +-----------------------------------------------------------+
87  *
88  * **Event device**: A hardware or software-based event scheduler.
89  *
90  * **Event**: Represents an item of work and is the smallest unit of scheduling.
91  * An event carries metadata, such as queue ID, scheduling type, and event priority,
92  * and data such as one or more packets or other kinds of buffers.
93  * Some examples of events are:
94  * - a software-generated item of work originating from a lcore,
95  *   perhaps carrying a packet to be processed.
96  * - a crypto work completion notification.
97  * - a timer expiry notification.
98  *
99  * **Event queue**: A queue containing events that are to be scheduled by the event device.
100  * An event queue contains events of different flows associated with scheduling
101  * types, such as atomic, ordered, or parallel.
102  * Each event given to an event device must have a valid event queue id field in the metadata,
103  * to specify on which event queue in the device the event must be placed,
104  * for later scheduling.
105  *
106  * **Event port**: An application's interface into the event dev for enqueue and
107  * dequeue operations. Each event port can be linked with one or more
108  * event queues for dequeue operations.
109  * Enqueue and dequeue from a port is not thread-safe, and the expected use-case is
110  * that each port is polled by only a single lcore. [If this is not the case,
111  * a suitable synchronization mechanism should be used to prevent simultaneous
112  * access from multiple lcores.]
113  * To schedule events to an lcore, the event device will schedule them to the event port(s)
114  * being polled by that lcore.
115  *
116  * *NOTE*: By default, all the functions of the Event Device API exported by a PMD
117  * are non-thread-safe functions, which must not be invoked on the same object in parallel on
118  * different logical cores.
119  * For instance, the dequeue function of a PMD cannot be invoked in parallel on two logical
120  * cores to operate on same  event port. Of course, this function
121  * can be invoked in parallel by different logical cores on different ports.
122  * It is the responsibility of the upper level application to enforce this rule.
123  *
124  * In all functions of the Event API, the Event device is
125  * designated by an integer >= 0 named the device identifier *dev_id*
126  *
127  * The functions exported by the application Event API to setup a device
128  * must be invoked in the following order:
129  *     - rte_event_dev_configure()
130  *     - rte_event_queue_setup()
131  *     - rte_event_port_setup()
132  *     - rte_event_port_link()
133  *     - rte_event_dev_start()
134  *
135  * Then, the application can invoke, in any order, the functions
136  * exported by the Event API to dequeue events, enqueue events,
137  * and link and unlink event queue(s) to event ports.
138  *
139  * Before configuring a device, an application should call rte_event_dev_info_get()
140  * to determine the capabilities of the event device, and any queue or port
141  * limits of that device. The parameters set in the various device configuration
142  * structures may need to be adjusted based on the max values provided in the
143  * device information structure returned from the rte_event_dev_info_get() API.
144  * An application may use rte_event_queue_default_conf_get() or
145  * rte_event_port_default_conf_get() to get the default configuration
146  * to set up an event queue or event port by overriding few default values.
147  *
148  * If the application wants to change the configuration (i.e. call
149  * rte_event_dev_configure(), rte_event_queue_setup(), or
150  * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the
151  * device and then do the reconfiguration before calling rte_event_dev_start()
152  * again. The schedule, enqueue and dequeue functions should not be invoked
153  * when the device is stopped.
154  *
155  * Finally, an application can close an Event device by invoking the
156  * rte_event_dev_close() function. Once closed, a device cannot be
157  * reconfigured or restarted.
158  *
159  * Driver-Oriented Event API
160  * -------------------------
161  *
162  * At the Event driver level, Event devices are represented by a generic
163  * data structure of type *rte_event_dev*.
164  *
165  * Event devices are dynamically registered during the PCI/SoC device probing
166  * phase performed at EAL initialization time.
167  * When an Event device is being probed, an *rte_event_dev* structure is allocated
168  * for it and the event_dev_init() function supplied by the Event driver
169  * is invoked to properly initialize the device.
170  *
171  * The role of the device init function is to reset the device hardware or
172  * to initialize the software event driver implementation.
173  *
174  * If the device init operation is successful, the device is assigned a device
175  * id (dev_id) for application use.
176  * Otherwise, the *rte_event_dev* structure is freed.
177  *
178  * Each function of the application Event API invokes a specific function
179  * of the PMD that controls the target device designated by its device
180  * identifier.
181  *
182  * For this purpose, all device-specific functions of an Event driver are
183  * supplied through a set of pointers contained in a generic structure of type
184  * *event_dev_ops*.
185  * The address of the *event_dev_ops* structure is stored in the *rte_event_dev*
186  * structure by the device init function of the Event driver, which is
187  * invoked during the PCI/SoC device probing phase, as explained earlier.
188  *
189  * In other words, each function of the Event API simply retrieves the
190  * *rte_event_dev* structure associated with the device identifier and
191  * performs an indirect invocation of the corresponding driver function
192  * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure.
193  *
194  * For performance reasons, the addresses of the fast-path functions of the
195  * event driver are not contained in the *event_dev_ops* structure.
196  * Instead, they are directly stored at the beginning of the *rte_event_dev*
197  * structure to avoid an extra indirect memory access during their invocation.
198  *
199  * Event Enqueue, Dequeue and Scheduling
200  * -------------------------------------
201  *
202  * RTE event device drivers do not use interrupts for enqueue or dequeue
203  * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue
204  * functions to applications.
205  *
206  * The events are injected to event device through *enqueue* operation by
207  * event producers in the system. The typical event producers are ethdev
208  * subsystem for generating packet events, CPU(SW) for generating events based
209  * on different stages of application processing, cryptodev for generating
210  * crypto work completion notification etc
211  *
212  * The *dequeue* operation gets one or more events from the event ports.
213  * The application processes the events and sends them to a downstream event queue through
214  * rte_event_enqueue_burst(), if it is an intermediate stage of event processing.
215  * On the final stage of processing, the application may use the Tx adapter API for maintaining
216  * the event ingress order while sending the packet/event on the wire via NIC Tx.
217  *
218  * The point at which events are scheduled to ports depends on the device.
219  * For hardware devices, scheduling occurs asynchronously without any software
220  * intervention. Software schedulers can either be distributed
221  * (each worker thread schedules events to its own port) or centralized
222  * (a dedicated thread schedules to all ports). Distributed software schedulers
223  * perform the scheduling inside the enqueue or dequeue functions, whereas centralized
224  * software schedulers need a dedicated service core for scheduling.
225  * The absence of the RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag
226  * indicates that the device is centralized and thus needs a dedicated scheduling
227  * thread (generally an RTE service that should be mapped to one or more service cores)
228  * that repeatedly calls the software specific scheduling function.
229  *
230  * An event driven worker thread has following typical workflow on fastpath:
231  * \code{.c}
232  *	while (1) {
233  *		rte_event_dequeue_burst(...);
234  *		(event processing)
235  *		rte_event_enqueue_burst(...);
236  *	}
237  * \endcode
238  */
239 
240 #ifdef __cplusplus
241 extern "C" {
242 #endif
243 
244 #include <rte_compat.h>
245 #include <rte_common.h>
246 #include <rte_errno.h>
247 #include <rte_mbuf_pool_ops.h>
248 #include <rte_mempool.h>
249 
250 #include "rte_eventdev_trace_fp.h"
251 
252 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
253 struct rte_event;
254 
255 /* Event device capability bitmap flags */
256 #define RTE_EVENT_DEV_CAP_QUEUE_QOS           (1ULL << 0)
257 /**< Event scheduling prioritization is based on the priority and weight
258  * associated with each event queue. Events from a queue with highest priority
259  * is scheduled first. If the queues are of same priority, weight of the queues
260  * are considered to select a queue in a weighted round robin fashion.
261  * Subsequent dequeue calls from an event port could see events from the same
262  * event queue, if the queue is configured with an affinity count. Affinity
263  * count is the number of subsequent dequeue calls, in which an event port
264  * should use the same event queue if the queue is non-empty
265  *
266  *  @see rte_event_queue_setup(), rte_event_queue_attr_set()
267  */
268 #define RTE_EVENT_DEV_CAP_EVENT_QOS           (1ULL << 1)
269 /**< Event scheduling prioritization is based on the priority associated with
270  *  each event. Priority of each event is supplied in *rte_event* structure
271  *  on each enqueue operation.
272  *
273  *  @see rte_event_enqueue_burst()
274  */
275 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED   (1ULL << 2)
276 /**< Event device operates in distributed scheduling mode.
277  * In distributed scheduling mode, event scheduling happens in HW or
278  * rte_event_dequeue_burst() or the combination of these two.
279  * If the flag is not set then eventdev is centralized and thus needs a
280  * dedicated service core that acts as a scheduling thread .
281  *
282  * @see rte_event_dequeue_burst()
283  */
284 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES     (1ULL << 3)
285 /**< Event device is capable of accepting enqueued events, of any type
286  * advertised as supported by the device, to all destination queues.
287  *
288  * When this capability is set, the "schedule_type" field of the
289  * rte_event_queue_conf structure is ignored when a queue is being configured.
290  * Instead the "sched_type" field of each event enqueued is used to
291  * select the scheduling to be performed on that event.
292  *
293  * If this capability is not set, the queue only supports events of the
294  *  *RTE_SCHED_TYPE_* type specified in the rte_event_queue_conf structure
295  *  at time of configuration.
296  *
297  * @see RTE_SCHED_TYPE_ATOMIC
298  * @see RTE_SCHED_TYPE_ORDERED
299  * @see RTE_SCHED_TYPE_PARALLEL
300  * @see rte_event_queue_conf.schedule_type
301  */
302 #define RTE_EVENT_DEV_CAP_BURST_MODE          (1ULL << 4)
303 /**< Event device is capable of operating in burst mode for enqueue(forward,
304  * release) and dequeue operation. If this capability is not set, application
305  * still uses the rte_event_dequeue_burst() and rte_event_enqueue_burst() but
306  * PMD accepts only one event at a time.
307  *
308  * @see rte_event_dequeue_burst() rte_event_enqueue_burst()
309  */
310 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE    (1ULL << 5)
311 /**< Event device ports support disabling the implicit release feature, in
312  * which the port will release all unreleased events in its dequeue operation.
313  * If this capability is set and the port is configured with implicit release
314  * disabled, the application is responsible for explicitly releasing events
315  * using either the RTE_EVENT_OP_FORWARD or the RTE_EVENT_OP_RELEASE event
316  * enqueue operations.
317  *
318  * @see rte_event_dequeue_burst() rte_event_enqueue_burst()
319  */
320 
321 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE         (1ULL << 6)
322 /**< Event device is capable of operating in none sequential mode. The path
323  * of the event is not necessary to be sequential. Application can change
324  * the path of event at runtime. If the flag is not set, then event each event
325  * will follow a path from queue 0 to queue 1 to queue 2 etc. If the flag is
326  * set, events may be sent to queues in any order. If the flag is not set, the
327  * eventdev will return an error when the application enqueues an event for a
328  * qid which is not the next in the sequence.
329  */
330 
331 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK   (1ULL << 7)
332 /**< Event device is capable of configuring the queue/port link at runtime.
333  * If the flag is not set, the eventdev queue/port link is only can be
334  * configured during  initialization.
335  */
336 
337 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
338 /**< Event device is capable of setting up the link between multiple queue
339  * with single port. If the flag is not set, the eventdev can only map a
340  * single queue to each port or map a single queue to many port.
341  */
342 
343 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
344 /**< Event device preserves the flow ID from the enqueued
345  * event to the dequeued event if the flag is set. Otherwise,
346  * the content of this field is implementation dependent.
347  */
348 
349 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
350 /**< Event device *does not* require calls to rte_event_maintain().
351  * An event device that does not set this flag requires calls to
352  * rte_event_maintain() during periods when neither
353  * rte_event_dequeue_burst() nor rte_event_enqueue_burst() are called
354  * on a port. This will allow the event device to perform internal
355  * processing, such as flushing buffered events, return credits to a
356  * global pool, or process signaling related to load balancing.
357  */
358 
359 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
360 /**< Event device is capable of changing the queue attributes at runtime i.e
361  * after rte_event_queue_setup() or rte_event_start() call sequence. If this
362  * flag is not set, eventdev queue attributes can only be configured during
363  * rte_event_queue_setup().
364  */
365 
366 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
367 /**< Event device is capable of supporting multiple link profiles per event port
368  * i.e., the value of `rte_event_dev_info::max_profiles_per_port` is greater
369  * than one.
370  */
371 
372 #define RTE_EVENT_DEV_CAP_ATOMIC  (1ULL << 13)
373 /**< Event device is capable of atomic scheduling.
374  * When this flag is set, the application can configure queues with scheduling type
375  * atomic on this event device.
376  * @see RTE_SCHED_TYPE_ATOMIC
377  */
378 
379 #define RTE_EVENT_DEV_CAP_ORDERED  (1ULL << 14)
380 /**< Event device is capable of ordered scheduling.
381  * When this flag is set, the application can configure queues with scheduling type
382  * ordered on this event device.
383  * @see RTE_SCHED_TYPE_ORDERED
384  */
385 
386 #define RTE_EVENT_DEV_CAP_PARALLEL  (1ULL << 15)
387 /**< Event device is capable of parallel scheduling.
388  * When this flag is set, the application can configure queues with scheduling type
389  * parallel on this event device.
390  * @see RTE_SCHED_TYPE_PARALLEL
391  */
392 
393 /* Event device priority levels */
394 #define RTE_EVENT_DEV_PRIORITY_HIGHEST   0
395 /**< Highest priority expressed across eventdev subsystem
396  * @see rte_event_queue_setup(), rte_event_enqueue_burst()
397  * @see rte_event_port_link()
398  */
399 #define RTE_EVENT_DEV_PRIORITY_NORMAL    128
400 /**< Normal priority expressed across eventdev subsystem
401  * @see rte_event_queue_setup(), rte_event_enqueue_burst()
402  * @see rte_event_port_link()
403  */
404 #define RTE_EVENT_DEV_PRIORITY_LOWEST    255
405 /**< Lowest priority expressed across eventdev subsystem
406  * @see rte_event_queue_setup(), rte_event_enqueue_burst()
407  * @see rte_event_port_link()
408  */
409 
410 /* Event queue scheduling weights */
411 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
412 /**< Highest weight of an event queue
413  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
414  */
415 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
416 /**< Lowest weight of an event queue
417  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
418  */
419 
420 /* Event queue scheduling affinity */
421 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
422 /**< Highest scheduling affinity of an event queue
423  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
424  */
425 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
426 /**< Lowest scheduling affinity of an event queue
427  * @see rte_event_queue_attr_get(), rte_event_queue_attr_set()
428  */
429 
430 /**
431  * Get the total number of event devices that have been successfully
432  * initialised.
433  *
434  * @return
435  *   The total number of usable event devices.
436  */
437 uint8_t
438 rte_event_dev_count(void);
439 
440 /**
441  * Get the device identifier for the named event device.
442  *
443  * @param name
444  *   Event device name to select the event device identifier.
445  *
446  * @return
447  *   Returns event device identifier on success.
448  *   - <0: Failure to find named event device.
449  */
450 int
451 rte_event_dev_get_dev_id(const char *name);
452 
453 /**
454  * Return the NUMA socket to which a device is connected.
455  *
456  * @param dev_id
457  *   The identifier of the device.
458  * @return
459  *   The NUMA socket id to which the device is connected or
460  *   a default of zero if the socket could not be determined.
461  *   -(-EINVAL)  dev_id value is out of range.
462  */
463 int
464 rte_event_dev_socket_id(uint8_t dev_id);
465 
466 /**
467  * Event device information
468  */
469 struct rte_event_dev_info {
470 	const char *driver_name;	/**< Event driver name */
471 	struct rte_device *dev;	/**< Device information */
472 	uint32_t min_dequeue_timeout_ns;
473 	/**< Minimum supported global dequeue timeout(ns) by this device */
474 	uint32_t max_dequeue_timeout_ns;
475 	/**< Maximum supported global dequeue timeout(ns) by this device */
476 	uint32_t dequeue_timeout_ns;
477 	/**< Configured global dequeue timeout(ns) for this device */
478 	uint8_t max_event_queues;
479 	/**< Maximum event_queues supported by this device */
480 	uint32_t max_event_queue_flows;
481 	/**< Maximum supported flows in an event queue by this device*/
482 	uint8_t max_event_queue_priority_levels;
483 	/**< Maximum number of event queue priority levels by this device.
484 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
485 	 */
486 	uint8_t max_event_priority_levels;
487 	/**< Maximum number of event priority levels by this device.
488 	 * Valid when the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability
489 	 */
490 	uint8_t max_event_ports;
491 	/**< Maximum number of event ports supported by this device */
492 	uint8_t max_event_port_dequeue_depth;
493 	/**< Maximum number of events can be dequeued at a time from an
494 	 * event port by this device.
495 	 * A device that does not support bulk dequeue will set this as 1.
496 	 */
497 	uint32_t max_event_port_enqueue_depth;
498 	/**< Maximum number of events can be enqueued at a time from an
499 	 * event port by this device.
500 	 * A device that does not support bulk enqueue will set this as 1.
501 	 */
502 	uint8_t max_event_port_links;
503 	/**< Maximum number of queues that can be linked to a single event
504 	 * port by this device.
505 	 */
506 	int32_t max_num_events;
507 	/**< A *closed system* event dev has a limit on the number of events it
508 	 * can manage at a time. An *open system* event dev does not have a
509 	 * limit and will specify this as -1.
510 	 */
511 	uint32_t event_dev_cap;
512 	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
513 	uint8_t max_single_link_event_port_queue_pairs;
514 	/**< Maximum number of event ports and queues that are optimized for
515 	 * (and only capable of) single-link configurations supported by this
516 	 * device. These ports and queues are not accounted for in
517 	 * max_event_ports or max_event_queues.
518 	 */
519 	uint8_t max_profiles_per_port;
520 	/**< Maximum number of event queue profiles per event port.
521 	 * A device that doesn't support multiple profiles will set this as 1.
522 	 */
523 };
524 
525 /**
526  * Retrieve the contextual information of an event device.
527  *
528  * @param dev_id
529  *   The identifier of the device.
530  *
531  * @param[out] dev_info
532  *   A pointer to a structure of type *rte_event_dev_info* to be filled with the
533  *   contextual information of the device.
534  *
535  * @return
536  *   - 0: Success, driver updates the contextual information of the event device
537  *   - <0: Error code returned by the driver info get function.
538  */
539 int
540 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
541 
542 /**
543  * The count of ports.
544  */
545 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
546 /**
547  * The count of queues.
548  */
549 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
550 /**
551  * The status of the device, zero for stopped, non-zero for started.
552  */
553 #define RTE_EVENT_DEV_ATTR_STARTED 2
554 
555 /**
556  * Get an attribute from a device.
557  *
558  * @param dev_id Eventdev id
559  * @param attr_id The attribute ID to retrieve
560  * @param[out] attr_value A pointer that will be filled in with the attribute
561  *             value if successful.
562  *
563  * @return
564  *   - 0: Successfully retrieved attribute value
565  *   - -EINVAL: Invalid device or  *attr_id* provided, or *attr_value* is NULL
566  */
567 int
568 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
569 		       uint32_t *attr_value);
570 
571 
572 /* Event device configuration bitmap flags */
573 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
574 /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns.
575  *  @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst()
576  */
577 
578 /** Event device configuration structure */
579 struct rte_event_dev_config {
580 	uint32_t dequeue_timeout_ns;
581 	/**< rte_event_dequeue_burst() timeout on this device.
582 	 * This value should be in the range of *min_dequeue_timeout_ns* and
583 	 * *max_dequeue_timeout_ns* which previously provided in
584 	 * rte_event_dev_info_get()
585 	 * The value 0 is allowed, in which case, default dequeue timeout used.
586 	 * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
587 	 */
588 	int32_t nb_events_limit;
589 	/**< In a *closed system* this field is the limit on maximum number of
590 	 * events that can be inflight in the eventdev at a given time. The
591 	 * limit is required to ensure that the finite space in a closed system
592 	 * is not overwhelmed. The value cannot exceed the *max_num_events*
593 	 * as provided by rte_event_dev_info_get().
594 	 * This value should be set to -1 for *open system*.
595 	 */
596 	uint8_t nb_event_queues;
597 	/**< Number of event queues to configure on this device.
598 	 * This value cannot exceed the *max_event_queues* which previously
599 	 * provided in rte_event_dev_info_get()
600 	 */
601 	uint8_t nb_event_ports;
602 	/**< Number of event ports to configure on this device.
603 	 * This value cannot exceed the *max_event_ports* which previously
604 	 * provided in rte_event_dev_info_get()
605 	 */
606 	uint32_t nb_event_queue_flows;
607 	/**< Number of flows for any event queue on this device.
608 	 * This value cannot exceed the *max_event_queue_flows* which previously
609 	 * provided in rte_event_dev_info_get()
610 	 */
611 	uint32_t nb_event_port_dequeue_depth;
612 	/**< Maximum number of events can be dequeued at a time from an
613 	 * event port by this device.
614 	 * This value cannot exceed the *max_event_port_dequeue_depth*
615 	 * which previously provided in rte_event_dev_info_get().
616 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
617 	 * @see rte_event_port_setup()
618 	 */
619 	uint32_t nb_event_port_enqueue_depth;
620 	/**< Maximum number of events can be enqueued at a time from an
621 	 * event port by this device.
622 	 * This value cannot exceed the *max_event_port_enqueue_depth*
623 	 * which previously provided in rte_event_dev_info_get().
624 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
625 	 * @see rte_event_port_setup()
626 	 */
627 	uint32_t event_dev_cfg;
628 	/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
629 	uint8_t nb_single_link_event_port_queues;
630 	/**< Number of event ports and queues that will be singly-linked to
631 	 * each other. These are a subset of the overall event ports and
632 	 * queues; this value cannot exceed *nb_event_ports* or
633 	 * *nb_event_queues*. If the device has ports and queues that are
634 	 * optimized for single-link usage, this field is a hint for how many
635 	 * to allocate; otherwise, regular event ports and queues can be used.
636 	 */
637 };
638 
639 /**
640  * Configure an event device.
641  *
642  * This function must be invoked first before any other function in the
643  * API. This function can also be re-invoked when a device is in the
644  * stopped state.
645  *
646  * The caller may use rte_event_dev_info_get() to get the capability of each
647  * resources available for this event device.
648  *
649  * @param dev_id
650  *   The identifier of the device to configure.
651  * @param dev_conf
652  *   The event device configuration structure.
653  *
654  * @return
655  *   - 0: Success, device configured.
656  *   - <0: Error code returned by the driver configuration function.
657  */
658 int
659 rte_event_dev_configure(uint8_t dev_id,
660 			const struct rte_event_dev_config *dev_conf);
661 
662 /* Event queue specific APIs */
663 
664 /* Event queue configuration bitmap flags */
665 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES          (1ULL << 0)
666 /**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue
667  *
668  * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
669  * @see rte_event_enqueue_burst()
670  */
671 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK        (1ULL << 1)
672 /**< This event queue links only to a single event port.
673  *
674  *  @see rte_event_port_setup(), rte_event_port_link()
675  */
676 
677 /** Event queue configuration structure */
678 struct rte_event_queue_conf {
679 	uint32_t nb_atomic_flows;
680 	/**< The maximum number of active flows this queue can track at any
681 	 * given time. If the queue is configured for atomic scheduling (by
682 	 * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg
683 	 * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the
684 	 * value must be in the range of [1, nb_event_queue_flows], which was
685 	 * previously provided in rte_event_dev_configure().
686 	 */
687 	uint32_t nb_atomic_order_sequences;
688 	/**< The maximum number of outstanding events waiting to be
689 	 * reordered by this queue. In other words, the number of entries in
690 	 * this queue’s reorder buffer.When the number of events in the
691 	 * reorder buffer reaches to *nb_atomic_order_sequences* then the
692 	 * scheduler cannot schedule the events from this queue and invalid
693 	 * event will be returned from dequeue until one or more entries are
694 	 * freed up/released.
695 	 * If the queue is configured for ordered scheduling (by applying the
696 	 * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or
697 	 * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must
698 	 * be in the range of [1, nb_event_queue_flows], which was
699 	 * previously supplied to rte_event_dev_configure().
700 	 */
701 	uint32_t event_queue_cfg;
702 	/**< Queue cfg flags(EVENT_QUEUE_CFG_) */
703 	uint8_t schedule_type;
704 	/**< Queue schedule type(RTE_SCHED_TYPE_*).
705 	 * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in
706 	 * event_queue_cfg.
707 	 */
708 	uint8_t priority;
709 	/**< Priority for this event queue relative to other event queues.
710 	 * The requested priority should in the range of
711 	 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
712 	 * The implementation shall normalize the requested priority to
713 	 * event device supported priority value.
714 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
715 	 */
716 	uint8_t weight;
717 	/**< Weight of the event queue relative to other event queues.
718 	 * The requested weight should be in the range of
719 	 * [RTE_EVENT_DEV_WEIGHT_HIGHEST, RTE_EVENT_DEV_WEIGHT_LOWEST].
720 	 * The implementation shall normalize the requested weight to event
721 	 * device supported weight value.
722 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
723 	 */
724 	uint8_t affinity;
725 	/**< Affinity of the event queue relative to other event queues.
726 	 * The requested affinity should be in the range of
727 	 * [RTE_EVENT_DEV_AFFINITY_HIGHEST, RTE_EVENT_DEV_AFFINITY_LOWEST].
728 	 * The implementation shall normalize the requested affinity to event
729 	 * device supported affinity value.
730 	 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
731 	 */
732 };
733 
734 /**
735  * Retrieve the default configuration information of an event queue designated
736  * by its *queue_id* from the event driver for an event device.
737  *
738  * This function intended to be used in conjunction with rte_event_queue_setup()
739  * where caller needs to set up the queue by overriding few default values.
740  *
741  * @param dev_id
742  *   The identifier of the device.
743  * @param queue_id
744  *   The index of the event queue to get the configuration information.
745  *   The value must be in the range [0, nb_event_queues - 1]
746  *   previously supplied to rte_event_dev_configure().
747  * @param[out] queue_conf
748  *   The pointer to the default event queue configuration data.
749  * @return
750  *   - 0: Success, driver updates the default event queue configuration data.
751  *   - <0: Error code returned by the driver info get function.
752  *
753  * @see rte_event_queue_setup()
754  */
755 int
756 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
757 				 struct rte_event_queue_conf *queue_conf);
758 
759 /**
760  * Allocate and set up an event queue for an event device.
761  *
762  * @param dev_id
763  *   The identifier of the device.
764  * @param queue_id
765  *   The index of the event queue to setup. The value must be in the range
766  *   [0, nb_event_queues - 1] previously supplied to rte_event_dev_configure().
767  * @param queue_conf
768  *   The pointer to the configuration data to be used for the event queue.
769  *   NULL value is allowed, in which case default configuration	used.
770  *
771  * @see rte_event_queue_default_conf_get()
772  *
773  * @return
774  *   - 0: Success, event queue correctly set up.
775  *   - <0: event queue configuration failed
776  */
777 int
778 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
779 		      const struct rte_event_queue_conf *queue_conf);
780 
781 /**
782  * The priority of the queue.
783  */
784 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
785 /**
786  * The number of atomic flows configured for the queue.
787  */
788 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
789 /**
790  * The number of atomic order sequences configured for the queue.
791  */
792 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
793 /**
794  * The cfg flags for the queue.
795  */
796 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
797 /**
798  * The schedule type of the queue.
799  */
800 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
801 /**
802  * The weight of the queue.
803  */
804 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
805 /**
806  * Affinity of the queue.
807  */
808 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
809 
810 /**
811  * Get an attribute from a queue.
812  *
813  * @param dev_id
814  *   Eventdev id
815  * @param queue_id
816  *   Eventdev queue id
817  * @param attr_id
818  *   The attribute ID to retrieve
819  * @param[out] attr_value
820  *   A pointer that will be filled in with the attribute value if successful
821  *
822  * @return
823  *   - 0: Successfully returned value
824  *   - -EINVAL: invalid device, queue or attr_id provided, or attr_value was
825  *		NULL
826  *   - -EOVERFLOW: returned when attr_id is set to
827  *   RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to
828  *   RTE_EVENT_QUEUE_CFG_ALL_TYPES
829  */
830 int
831 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
832 			uint32_t *attr_value);
833 
834 /**
835  * Set an event queue attribute.
836  *
837  * @param dev_id
838  *   Eventdev id
839  * @param queue_id
840  *   Eventdev queue id
841  * @param attr_id
842  *   The attribute ID to set
843  * @param attr_value
844  *   The attribute value to set
845  *
846  * @return
847  *   - 0: Successfully set attribute.
848  *   - -EINVAL: invalid device, queue or attr_id.
849  *   - -ENOTSUP: device does not support setting the event attribute.
850  *   - <0: failed to set event queue attribute
851  */
852 int
853 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
854 			 uint64_t attr_value);
855 
856 /* Event port specific APIs */
857 
858 /* Event port configuration bitmap flags */
859 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
860 /**< Configure the port not to release outstanding events in
861  * rte_event_dev_dequeue_burst(). If set, all events received through
862  * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
863  * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
864  * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
865  */
866 #define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)
867 /**< This event port links only to a single event queue.
868  *
869  *  @see rte_event_port_setup(), rte_event_port_link()
870  */
871 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER       (1ULL << 2)
872 /**< Hint that this event port will primarily enqueue events to the system.
873  * A PMD can optimize its internal workings by assuming that this port is
874  * primarily going to enqueue NEW events.
875  *
876  * Note that this flag is only a hint, so PMDs must operate under the
877  * assumption that any port can enqueue an event with any type of op.
878  *
879  *  @see rte_event_port_setup()
880  */
881 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER       (1ULL << 3)
882 /**< Hint that this event port will primarily dequeue events from the system.
883  * A PMD can optimize its internal workings by assuming that this port is
884  * primarily going to consume events, and not enqueue FORWARD or RELEASE
885  * events.
886  *
887  * Note that this flag is only a hint, so PMDs must operate under the
888  * assumption that any port can enqueue an event with any type of op.
889  *
890  *  @see rte_event_port_setup()
891  */
892 #define RTE_EVENT_PORT_CFG_HINT_WORKER         (1ULL << 4)
893 /**< Hint that this event port will primarily pass existing events through.
894  * A PMD can optimize its internal workings by assuming that this port is
895  * primarily going to FORWARD events, and not enqueue NEW or RELEASE events
896  * often.
897  *
898  * Note that this flag is only a hint, so PMDs must operate under the
899  * assumption that any port can enqueue an event with any type of op.
900  *
901  *  @see rte_event_port_setup()
902  */
903 
904 /** Event port configuration structure */
905 struct rte_event_port_conf {
906 	int32_t new_event_threshold;
907 	/**< A backpressure threshold for new event enqueues on this port.
908 	 * Use for *closed system* event dev where event capacity is limited,
909 	 * and cannot exceed the capacity of the event dev.
910 	 * Configuring ports with different thresholds can make higher priority
911 	 * traffic less likely to  be backpressured.
912 	 * For example, a port used to inject NIC Rx packets into the event dev
913 	 * can have a lower threshold so as not to overwhelm the device,
914 	 * while ports used for worker pools can have a higher threshold.
915 	 * This value cannot exceed the *nb_events_limit*
916 	 * which was previously supplied to rte_event_dev_configure().
917 	 * This should be set to '-1' for *open system*.
918 	 */
919 	uint16_t dequeue_depth;
920 	/**< Configure number of bulk dequeues for this event port.
921 	 * This value cannot exceed the *nb_event_port_dequeue_depth*
922 	 * which previously supplied to rte_event_dev_configure().
923 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
924 	 */
925 	uint16_t enqueue_depth;
926 	/**< Configure number of bulk enqueues for this event port.
927 	 * This value cannot exceed the *nb_event_port_enqueue_depth*
928 	 * which previously supplied to rte_event_dev_configure().
929 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
930 	 */
931 	uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */
932 };
933 
934 /**
935  * Retrieve the default configuration information of an event port designated
936  * by its *port_id* from the event driver for an event device.
937  *
938  * This function intended to be used in conjunction with rte_event_port_setup()
939  * where caller needs to set up the port by overriding few default values.
940  *
941  * @param dev_id
942  *   The identifier of the device.
943  * @param port_id
944  *   The index of the event port to get the configuration information.
945  *   The value must be in the range [0, nb_event_ports - 1]
946  *   previously supplied to rte_event_dev_configure().
947  * @param[out] port_conf
948  *   The pointer to the default event port configuration data
949  * @return
950  *   - 0: Success, driver updates the default event port configuration data.
951  *   - <0: Error code returned by the driver info get function.
952  *
953  * @see rte_event_port_setup()
954  */
955 int
956 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
957 				struct rte_event_port_conf *port_conf);
958 
959 /**
960  * Allocate and set up an event port for an event device.
961  *
962  * @param dev_id
963  *   The identifier of the device.
964  * @param port_id
965  *   The index of the event port to setup. The value must be in the range
966  *   [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
967  * @param port_conf
968  *   The pointer to the configuration data to be used for the queue.
969  *   NULL value is allowed, in which case default configuration	used.
970  *
971  * @see rte_event_port_default_conf_get()
972  *
973  * @return
974  *   - 0: Success, event port correctly set up.
975  *   - <0: Port configuration failed
976  *   - (-EDQUOT) Quota exceeded(Application tried to link the queue configured
977  *   with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
978  */
979 int
980 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
981 		     const struct rte_event_port_conf *port_conf);
982 
983 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
984 					  struct rte_event event, void *arg);
985 /**< Callback function prototype that can be passed during
986  * rte_event_port_release(), invoked once per a released event.
987  */
988 
989 /**
990  * Quiesce any core specific resources consumed by the event port.
991  *
992  * Event ports are generally coupled with lcores, and a given Hardware
993  * implementation might require the PMD to store port specific data in the
994  * lcore.
995  * When the application decides to migrate the event port to another lcore
996  * or teardown the current lcore it may to call `rte_event_port_quiesce`
997  * to make sure that all the data associated with the event port are released
998  * from the lcore, this might also include any prefetched events.
999  * While releasing the event port from the lcore, this function calls the
1000  * user-provided flush callback once per event.
1001  *
1002  * @note Invocation of this API does not affect the existing port configuration.
1003  *
1004  * @param dev_id
1005  *   The identifier of the device.
1006  * @param port_id
1007  *   The index of the event port to setup. The value must be in the range
1008  *   [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
1009  * @param release_cb
1010  *   Callback function invoked once per flushed event.
1011  * @param args
1012  *   Argument supplied to callback.
1013  */
1014 void
1015 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
1016 		       rte_eventdev_port_flush_t release_cb, void *args);
1017 
1018 /**
1019  * The queue depth of the port on the enqueue side
1020  */
1021 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
1022 /**
1023  * The queue depth of the port on the dequeue side
1024  */
1025 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
1026 /**
1027  * The new event threshold of the port
1028  */
1029 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
1030 /**
1031  * The implicit release disable attribute of the port
1032  */
1033 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
1034 
1035 /**
1036  * Get an attribute from a port.
1037  *
1038  * @param dev_id
1039  *   Eventdev id
1040  * @param port_id
1041  *   Eventdev port id
1042  * @param attr_id
1043  *   The attribute ID to retrieve
1044  * @param[out] attr_value
1045  *   A pointer that will be filled in with the attribute value if successful
1046  *
1047  * @return
1048  *   - 0: Successfully returned value
1049  *   - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL
1050  */
1051 int
1052 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1053 			uint32_t *attr_value);
1054 
1055 /**
1056  * Start an event device.
1057  *
1058  * The device start step is the last one and consists of setting the event
1059  * queues to start accepting the events and schedules to event ports.
1060  *
1061  * On success, all basic functions exported by the API (event enqueue,
1062  * event dequeue and so on) can be invoked.
1063  *
1064  * @param dev_id
1065  *   Event device identifier
1066  * @return
1067  *   - 0: Success, device started.
1068  *   - -ESTALE : Not all ports of the device are configured
1069  *   - -ENOLINK: Not all queues are linked, which could lead to deadlock.
1070  */
1071 int
1072 rte_event_dev_start(uint8_t dev_id);
1073 
1074 /**
1075  * Stop an event device.
1076  *
1077  * This function causes all queued events to be drained, including those
1078  * residing in event ports. While draining events out of the device, this
1079  * function calls the user-provided flush callback (if one was registered) once
1080  * per event.
1081  *
1082  * The device can be restarted with a call to rte_event_dev_start(). Threads
1083  * that continue to enqueue/dequeue while the device is stopped, or being
1084  * stopped, will result in undefined behavior. This includes event adapters,
1085  * which must be stopped prior to stopping the eventdev.
1086  *
1087  * @param dev_id
1088  *   Event device identifier.
1089  *
1090  * @see rte_event_dev_stop_flush_callback_register()
1091  */
1092 void
1093 rte_event_dev_stop(uint8_t dev_id);
1094 
1095 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1096 					  struct rte_event event, void *arg);
1097 /**< Callback function called during rte_event_dev_stop(), invoked once per
1098  * flushed event.
1099  */
1100 
1101 /**
1102  * Registers a callback function to be invoked during rte_event_dev_stop() for
1103  * each flushed event. This function can be used to properly dispose of queued
1104  * events, for example events containing memory pointers.
1105  *
1106  * The callback function is only registered for the calling process. The
1107  * callback function must be registered in every process that can call
1108  * rte_event_dev_stop().
1109  *
1110  * To unregister a callback, call this function with a NULL callback pointer.
1111  *
1112  * @param dev_id
1113  *   The identifier of the device.
1114  * @param callback
1115  *   Callback function invoked once per flushed event.
1116  * @param userdata
1117  *   Argument supplied to callback.
1118  *
1119  * @return
1120  *  - 0 on success.
1121  *  - -EINVAL if *dev_id* is invalid
1122  *
1123  * @see rte_event_dev_stop()
1124  */
1125 int rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1126 					       rte_eventdev_stop_flush_t callback, void *userdata);
1127 
1128 /**
1129  * Close an event device. The device cannot be restarted!
1130  *
1131  * @param dev_id
1132  *   Event device identifier
1133  *
1134  * @return
1135  *  - 0 on successfully closing device
1136  *  - <0 on failure to close device
1137  *  - (-EAGAIN) if device is busy
1138  */
1139 int
1140 rte_event_dev_close(uint8_t dev_id);
1141 
1142 /**
1143  * Event vector structure.
1144  */
1145 struct rte_event_vector {
1146 	uint16_t nb_elem;
1147 	/**< Number of elements valid in this event vector. */
1148 	uint16_t elem_offset : 12;
1149 	/**< Offset into the vector array where valid elements start from. */
1150 	uint16_t rsvd : 3;
1151 	/**< Reserved for future use */
1152 	uint16_t attr_valid : 1;
1153 	/**< Indicates that the below union attributes have valid information.
1154 	 */
1155 	union {
1156 		/* Used by Rx/Tx adapter.
1157 		 * Indicates that all the elements in this vector belong to the
1158 		 * same port and queue pair when originating from Rx adapter,
1159 		 * valid only when event type is ETHDEV_VECTOR or
1160 		 * ETH_RX_ADAPTER_VECTOR.
1161 		 * Can also be used to indicate the Tx adapter the destination
1162 		 * port and queue of the mbufs in the vector
1163 		 */
1164 		struct {
1165 			uint16_t port;
1166 			/* Ethernet device port id. */
1167 			uint16_t queue;
1168 			/* Ethernet device queue id. */
1169 		};
1170 	};
1171 	/**< Union to hold common attributes of the vector array. */
1172 	uint64_t impl_opaque;
1173 
1174 /* empty structures do not have zero size in C++ leading to compilation errors
1175  * with clang about structure having different sizes in C and C++.
1176  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1177  * C++ builds, removing the warning.
1178  */
1179 #ifndef __cplusplus
1180 	/**< Implementation specific opaque value.
1181 	 * An implementation may use this field to hold implementation specific
1182 	 * value to share between dequeue and enqueue operation.
1183 	 * The application should not modify this field.
1184 	 */
1185 	union {
1186 #endif
1187 		struct rte_mbuf *mbufs[0];
1188 		void *ptrs[0];
1189 		uint64_t u64s[0];
1190 #ifndef __cplusplus
1191 	} __rte_aligned(16);
1192 #endif
1193 	/**< Start of the vector array union. Depending upon the event type the
1194 	 * vector array can be an array of mbufs or pointers or opaque u64
1195 	 * values.
1196 	 */
1197 } __rte_aligned(16);
1198 
1199 /* Scheduler type definitions */
1200 #define RTE_SCHED_TYPE_ORDERED          0
1201 /**< Ordered scheduling
1202  *
1203  * Events from an ordered flow of an event queue can be scheduled to multiple
1204  * ports for concurrent processing while maintaining the original event order.
1205  * This scheme enables the user to achieve high single flow throughput by
1206  * avoiding SW synchronization for ordering between ports which bound to cores.
1207  *
1208  * The source flow ordering from an event queue is maintained when events are
1209  * enqueued to their destination queue within the same ordered flow context.
1210  * An event port holds the context until application call
1211  * rte_event_dequeue_burst() from the same port, which implicitly releases
1212  * the context.
1213  * User may allow the scheduler to release the context earlier than that
1214  * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation.
1215  *
1216  * Events from the source queue appear in their original order when dequeued
1217  * from a destination queue.
1218  * Event ordering is based on the received event(s), but also other
1219  * (newly allocated or stored) events are ordered when enqueued within the same
1220  * ordered context. Events not enqueued (e.g. released or stored) within the
1221  * context are  considered missing from reordering and are skipped at this time
1222  * (but can be ordered again within another context).
1223  *
1224  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1225  */
1226 
1227 #define RTE_SCHED_TYPE_ATOMIC           1
1228 /**< Atomic scheduling
1229  *
1230  * Events from an atomic flow of an event queue can be scheduled only to a
1231  * single port at a time. The port is guaranteed to have exclusive (atomic)
1232  * access to the associated flow context, which enables the user to avoid SW
1233  * synchronization. Atomic flows also help to maintain event ordering
1234  * since only one port at a time can process events from a flow of an
1235  * event queue.
1236  *
1237  * The atomic queue synchronization context is dedicated to the port until
1238  * application call rte_event_dequeue_burst() from the same port,
1239  * which implicitly releases the context. User may allow the scheduler to
1240  * release the context earlier than that by invoking rte_event_enqueue_burst()
1241  * with RTE_EVENT_OP_RELEASE operation.
1242  *
1243  * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
1244  */
1245 
1246 #define RTE_SCHED_TYPE_PARALLEL         2
1247 /**< Parallel scheduling
1248  *
1249  * The scheduler performs priority scheduling, load balancing, etc. functions
1250  * but does not provide additional event synchronization or ordering.
1251  * It is free to schedule events from a single parallel flow of an event queue
1252  * to multiple events ports for concurrent processing.
1253  * The application is responsible for flow context synchronization and
1254  * event ordering (SW synchronization).
1255  *
1256  * @see rte_event_queue_setup(), rte_event_dequeue_burst()
1257  */
1258 
1259 /* Event types to classify the event source */
1260 #define RTE_EVENT_TYPE_ETHDEV           0x0
1261 /**< The event generated from ethdev subsystem */
1262 #define RTE_EVENT_TYPE_CRYPTODEV        0x1
1263 /**< The event generated from crypodev subsystem */
1264 #define RTE_EVENT_TYPE_TIMER		0x2
1265 /**< The event generated from event timer adapter */
1266 #define RTE_EVENT_TYPE_CPU              0x3
1267 /**< The event generated from cpu for pipelining.
1268  * Application may use *sub_event_type* to further classify the event
1269  */
1270 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER   0x4
1271 /**< The event generated from event eth Rx adapter */
1272 #define RTE_EVENT_TYPE_DMADEV           0x5
1273 /**< The event generated from dma subsystem */
1274 #define RTE_EVENT_TYPE_VECTOR           0x8
1275 /**< Indicates that event is a vector.
1276  * All vector event types should be a logical OR of EVENT_TYPE_VECTOR.
1277  * This simplifies the pipeline design as one can split processing the events
1278  * between vector events and normal event across event types.
1279  * Example:
1280  *	if (ev.event_type & RTE_EVENT_TYPE_VECTOR) {
1281  *		// Classify and handle vector event.
1282  *	} else {
1283  *		// Classify and handle event.
1284  *	}
1285  */
1286 #define RTE_EVENT_TYPE_ETHDEV_VECTOR                                           \
1287 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1288 /**< The event vector generated from ethdev subsystem */
1289 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1290 /**< The event vector generated from cpu for pipelining. */
1291 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR                                   \
1292 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1293 /**< The event vector generated from eth Rx adapter. */
1294 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR                                        \
1295 	(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1296 /**< The event vector generated from cryptodev adapter. */
1297 
1298 #define RTE_EVENT_TYPE_MAX              0x10
1299 /**< Maximum number of event types */
1300 
1301 /* Event enqueue operations */
1302 #define RTE_EVENT_OP_NEW                0
1303 /**< The event producers use this operation to inject a new event to the
1304  * event device.
1305  */
1306 #define RTE_EVENT_OP_FORWARD            1
1307 /**< The CPU use this operation to forward the event to different event queue or
1308  * change to new application specific flow or schedule type to enable
1309  * pipelining.
1310  *
1311  * This operation must only be enqueued to the same port that the
1312  * event to be forwarded was dequeued from.
1313  */
1314 #define RTE_EVENT_OP_RELEASE            2
1315 /**< Release the flow context associated with the schedule type.
1316  *
1317  * If current flow's scheduler type method is *RTE_SCHED_TYPE_ATOMIC*
1318  * then this function hints the scheduler that the user has completed critical
1319  * section processing in the current atomic context.
1320  * The scheduler is now allowed to schedule events from the same flow from
1321  * an event queue to another port. However, the context may be still held
1322  * until the next rte_event_dequeue_burst() call, this call allows but does not
1323  * force the scheduler to release the context early.
1324  *
1325  * Early atomic context release may increase parallelism and thus system
1326  * performance, but the user needs to design carefully the split into critical
1327  * vs non-critical sections.
1328  *
1329  * If current flow's scheduler type method is *RTE_SCHED_TYPE_ORDERED*
1330  * then this function hints the scheduler that the user has done all that need
1331  * to maintain event order in the current ordered context.
1332  * The scheduler is allowed to release the ordered context of this port and
1333  * avoid reordering any following enqueues.
1334  *
1335  * Early ordered context release may increase parallelism and thus system
1336  * performance.
1337  *
1338  * If current flow's scheduler type method is *RTE_SCHED_TYPE_PARALLEL*
1339  * or no scheduling context is held then this function may be an NOOP,
1340  * depending on the implementation.
1341  *
1342  * This operation must only be enqueued to the same port that the
1343  * event to be released was dequeued from.
1344  */
1345 
1346 /**
1347  * The generic *rte_event* structure to hold the event attributes
1348  * for dequeue and enqueue operation
1349  */
1350 struct rte_event {
1351 	/** WORD0 */
1352 	union {
1353 		uint64_t event;
1354 		/** Event attributes for dequeue or enqueue operation */
1355 		struct {
1356 			uint32_t flow_id:20;
1357 			/**< Targeted flow identifier for the enqueue and
1358 			 * dequeue operation.
1359 			 * The value must be in the range of
1360 			 * [0, nb_event_queue_flows - 1] which
1361 			 * previously supplied to rte_event_dev_configure().
1362 			 */
1363 			uint32_t sub_event_type:8;
1364 			/**< Sub-event types based on the event source.
1365 			 * @see RTE_EVENT_TYPE_CPU
1366 			 */
1367 			uint32_t event_type:4;
1368 			/**< Event type to classify the event source.
1369 			 * @see RTE_EVENT_TYPE_ETHDEV, (RTE_EVENT_TYPE_*)
1370 			 */
1371 			uint8_t op:2;
1372 			/**< The type of event enqueue operation - new/forward/
1373 			 * etc.This field is not preserved across an instance
1374 			 * and is undefined on dequeue.
1375 			 * @see RTE_EVENT_OP_NEW, (RTE_EVENT_OP_*)
1376 			 */
1377 			uint8_t rsvd:4;
1378 			/**< Reserved for future use */
1379 			uint8_t sched_type:2;
1380 			/**< Scheduler synchronization type (RTE_SCHED_TYPE_*)
1381 			 * associated with flow id on a given event queue
1382 			 * for the enqueue and dequeue operation.
1383 			 */
1384 			uint8_t queue_id;
1385 			/**< Targeted event queue identifier for the enqueue or
1386 			 * dequeue operation.
1387 			 * The value must be in the range of
1388 			 * [0, nb_event_queues - 1] which previously supplied to
1389 			 * rte_event_dev_configure().
1390 			 */
1391 			uint8_t priority;
1392 			/**< Event priority relative to other events in the
1393 			 * event queue. The requested priority should in the
1394 			 * range of  [RTE_EVENT_DEV_PRIORITY_HIGHEST,
1395 			 * RTE_EVENT_DEV_PRIORITY_LOWEST].
1396 			 * The implementation shall normalize the requested
1397 			 * priority to supported priority value.
1398 			 * Valid when the device has
1399 			 * RTE_EVENT_DEV_CAP_EVENT_QOS capability.
1400 			 */
1401 			uint8_t impl_opaque;
1402 			/**< Implementation specific opaque value.
1403 			 * An implementation may use this field to hold
1404 			 * implementation specific value to share between
1405 			 * dequeue and enqueue operation.
1406 			 * The application should not modify this field.
1407 			 */
1408 		};
1409 	};
1410 	/** WORD1 */
1411 	union {
1412 		uint64_t u64;
1413 		/**< Opaque 64-bit value */
1414 		void *event_ptr;
1415 		/**< Opaque event pointer */
1416 		struct rte_mbuf *mbuf;
1417 		/**< mbuf pointer if dequeued event is associated with mbuf */
1418 		struct rte_event_vector *vec;
1419 		/**< Event vector pointer. */
1420 	};
1421 };
1422 
1423 /* Ethdev Rx adapter capability bitmap flags */
1424 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT	0x1
1425 /**< This flag is sent when the packet transfer mechanism is in HW.
1426  * Ethdev can send packets to the event device using internal event port.
1427  */
1428 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ	0x2
1429 /**< Adapter supports multiple event queues per ethdev. Every ethdev
1430  * Rx queue can be connected to a unique event queue.
1431  */
1432 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID	0x4
1433 /**< The application can override the adapter generated flow ID in the
1434  * event. This flow ID can be specified when adding an ethdev Rx queue
1435  * to the adapter using the ev.flow_id member.
1436  * @see struct rte_event_eth_rx_adapter_queue_conf::ev
1437  * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
1438  */
1439 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR	0x8
1440 /**< Adapter supports event vectorization per ethdev. */
1441 
1442 /**
1443  * Retrieve the event device's ethdev Rx adapter capabilities for the
1444  * specified ethernet port
1445  *
1446  * @param dev_id
1447  *   The identifier of the device.
1448  *
1449  * @param eth_port_id
1450  *   The identifier of the ethernet device.
1451  *
1452  * @param[out] caps
1453  *   A pointer to memory filled with Rx event adapter capabilities.
1454  *
1455  * @return
1456  *   - 0: Success, driver provides Rx event adapter capabilities for the
1457  *	ethernet device.
1458  *   - <0: Error code returned by the driver function.
1459  */
1460 int
1461 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1462 				uint32_t *caps);
1463 
1464 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1465 /**< This flag is set when the timer mechanism is in HW. */
1466 
1467 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC      (1ULL << 1)
1468 /**< This flag is set if periodic mode is supported. */
1469 
1470 /**
1471  * Retrieve the event device's timer adapter capabilities.
1472  *
1473  * @param dev_id
1474  *   The identifier of the device.
1475  *
1476  * @param[out] caps
1477  *   A pointer to memory to be filled with event timer adapter capabilities.
1478  *
1479  * @return
1480  *   - 0: Success, driver provided event timer adapter capabilities.
1481  *   - <0: Error code returned by the driver function.
1482  */
1483 int
1484 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1485 
1486 /* Crypto adapter capability bitmap flag */
1487 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW   0x1
1488 /**< Flag indicates HW is capable of generating events in
1489  * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send
1490  * packets to the event device as new events using an internal
1491  * event port.
1492  */
1493 
1494 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD   0x2
1495 /**< Flag indicates HW is capable of generating events in
1496  * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send
1497  * packets to the event device as forwarded event using an
1498  * internal event port.
1499  */
1500 
1501 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND  0x4
1502 /**< Flag indicates HW is capable of mapping crypto queue pair to
1503  * event queue.
1504  */
1505 
1506 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA   0x8
1507 /**< Flag indicates HW/SW supports a mechanism to store and retrieve
1508  * the private data information along with the crypto session.
1509  */
1510 
1511 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR   0x10
1512 /**< Flag indicates HW is capable of aggregating processed
1513  * crypto operations into rte_event_vector.
1514  */
1515 
1516 /**
1517  * Retrieve the event device's crypto adapter capabilities for the
1518  * specified cryptodev device
1519  *
1520  * @param dev_id
1521  *   The identifier of the device.
1522  *
1523  * @param cdev_id
1524  *   The identifier of the cryptodev device.
1525  *
1526  * @param[out] caps
1527  *   A pointer to memory filled with event adapter capabilities.
1528  *   It is expected to be pre-allocated & initialized by caller.
1529  *
1530  * @return
1531  *   - 0: Success, driver provides event adapter capabilities for the
1532  *     cryptodev device.
1533  *   - <0: Error code returned by the driver function.
1534  */
1535 int
1536 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1537 				  uint32_t *caps);
1538 
1539 /* DMA adapter capability bitmap flag */
1540 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1541 /**< Flag indicates HW is capable of generating events in
1542  * RTE_EVENT_OP_NEW enqueue operation. DMADEV will send
1543  * packets to the event device as new events using an
1544  * internal event port.
1545  */
1546 
1547 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1548 /**< Flag indicates HW is capable of generating events in
1549  * RTE_EVENT_OP_FORWARD enqueue operation. DMADEV will send
1550  * packets to the event device as forwarded event using an
1551  * internal event port.
1552  */
1553 
1554 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1555 /**< Flag indicates HW is capable of mapping DMA vchan to event queue. */
1556 
1557 /**
1558  * Retrieve the event device's DMA adapter capabilities for the
1559  * specified dmadev device
1560  *
1561  * @param dev_id
1562  *   The identifier of the device.
1563  *
1564  * @param dmadev_id
1565  *   The identifier of the dmadev device.
1566  *
1567  * @param[out] caps
1568  *   A pointer to memory filled with event adapter capabilities.
1569  *   It is expected to be pre-allocated & initialized by caller.
1570  *
1571  * @return
1572  *   - 0: Success, driver provides event adapter capabilities for the
1573  *     dmadev device.
1574  *   - <0: Error code returned by the driver function.
1575  *
1576  */
1577 __rte_experimental
1578 int
1579 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1580 
1581 /* Ethdev Tx adapter capability bitmap flags */
1582 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT	0x1
1583 /**< This flag is sent when the PMD supports a packet transmit callback
1584  */
1585 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR	0x2
1586 /**< Indicates that the Tx adapter is capable of handling event vector of
1587  * mbufs.
1588  */
1589 
1590 /**
1591  * Retrieve the event device's eth Tx adapter capabilities
1592  *
1593  * @param dev_id
1594  *   The identifier of the device.
1595  *
1596  * @param eth_port_id
1597  *   The identifier of the ethernet device.
1598  *
1599  * @param[out] caps
1600  *   A pointer to memory filled with eth Tx adapter capabilities.
1601  *
1602  * @return
1603  *   - 0: Success, driver provides eth Tx adapter capabilities.
1604  *   - <0: Error code returned by the driver function.
1605  */
1606 int
1607 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1608 				uint32_t *caps);
1609 
1610 /**
1611  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
1612  *
1613  * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag
1614  * then application can use this function to convert timeout value in
1615  * nanoseconds to implementations specific timeout value supplied in
1616  * rte_event_dequeue_burst()
1617  *
1618  * @param dev_id
1619  *   The identifier of the device.
1620  * @param ns
1621  *   Wait time in nanosecond
1622  * @param[out] timeout_ticks
1623  *   Value for the *timeout_ticks* parameter in rte_event_dequeue_burst()
1624  *
1625  * @return
1626  *  - 0 on success.
1627  *  - -ENOTSUP if the device doesn't support timeouts
1628  *  - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL
1629  *  - other values < 0 on failure.
1630  *
1631  * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
1632  * @see rte_event_dev_configure()
1633  */
1634 int
1635 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1636 					uint64_t *timeout_ticks);
1637 
1638 /**
1639  * Link multiple source event queues supplied in *queues* to the destination
1640  * event port designated by its *port_id* with associated service priority
1641  * supplied in *priorities* on the event device designated by its *dev_id*.
1642  *
1643  * The link establishment shall enable the event port *port_id* from
1644  * receiving events from the specified event queue(s) supplied in *queues*
1645  *
1646  * An event queue may link to one or more event ports.
1647  * The number of links can be established from an event queue to event port is
1648  * implementation defined.
1649  *
1650  * Event queue(s) to event port link establishment can be changed at runtime
1651  * without re-configuring the device to support scaling and to reduce the
1652  * latency of critical work by establishing the link with more event ports
1653  * at runtime.
1654  *
1655  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
1656  * than or equal to one, this function links the event queues to the default
1657  * profile_id i.e. profile_id 0 of the event port.
1658  *
1659  * @param dev_id
1660  *   The identifier of the device.
1661  *
1662  * @param port_id
1663  *   Event port identifier to select the destination port to link.
1664  *
1665  * @param queues
1666  *   Points to an array of *nb_links* event queues to be linked
1667  *   to the event port.
1668  *   NULL value is allowed, in which case this function links all the configured
1669  *   event queues *nb_event_queues* which previously supplied to
1670  *   rte_event_dev_configure() to the event port *port_id*
1671  *
1672  * @param priorities
1673  *   Points to an array of *nb_links* service priorities associated with each
1674  *   event queue link to event port.
1675  *   The priority defines the event port's servicing priority for
1676  *   event queue, which may be ignored by an implementation.
1677  *   The requested priority should in the range of
1678  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
1679  *   The implementation shall normalize the requested priority to
1680  *   implementation supported priority value.
1681  *   NULL value is allowed, in which case this function links the event queues
1682  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
1683  *
1684  * @param nb_links
1685  *   The number of links to establish. This parameter is ignored if queues is
1686  *   NULL.
1687  *
1688  * @return
1689  * The number of links actually established. The return value can be less than
1690  * the value of the *nb_links* parameter when the implementation has the
1691  * limitation on specific queue to port link establishment or if invalid
1692  * parameters are specified in *queues*
1693  * If the return value is less than *nb_links*, the remaining links at the end
1694  * of link[] are not established, and the caller has to take care of them.
1695  * If return value is less than *nb_links* then implementation shall update the
1696  * rte_errno accordingly, Possible rte_errno values are
1697  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
1698  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1699  * (EINVAL) Invalid parameter
1700  */
1701 int
1702 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1703 		    const uint8_t queues[], const uint8_t priorities[],
1704 		    uint16_t nb_links);
1705 
1706 /**
1707  * Unlink multiple source event queues supplied in *queues* from the destination
1708  * event port designated by its *port_id* on the event device designated
1709  * by its *dev_id*.
1710  *
1711  * The unlink call issues an async request to disable the event port *port_id*
1712  * from receiving events from the specified event queue *queue_id*.
1713  * Event queue(s) to event port unlink establishment can be changed at runtime
1714  * without re-configuring the device.
1715  *
1716  * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
1717  * than or equal to one, this function unlinks the event queues from the default
1718  * profile identifier i.e. profile 0 of the event port.
1719  *
1720  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
1721  *
1722  * @param dev_id
1723  *   The identifier of the device.
1724  *
1725  * @param port_id
1726  *   Event port identifier to select the destination port to unlink.
1727  *
1728  * @param queues
1729  *   Points to an array of *nb_unlinks* event queues to be unlinked
1730  *   from the event port.
1731  *   NULL value is allowed, in which case this function unlinks all the
1732  *   event queue(s) from the event port *port_id*.
1733  *
1734  * @param nb_unlinks
1735  *   The number of unlinks to establish. This parameter is ignored if queues is
1736  *   NULL.
1737  *
1738  * @return
1739  * The number of unlinks successfully requested. The return value can be less
1740  * than the value of the *nb_unlinks* parameter when the implementation has the
1741  * limitation on specific queue to port unlink establishment or
1742  * if invalid parameters are specified.
1743  * If the return value is less than *nb_unlinks*, the remaining queues at the
1744  * end of queues[] are not unlinked, and the caller has to take care of them.
1745  * If return value is less than *nb_unlinks* then implementation shall update
1746  * the rte_errno accordingly, Possible rte_errno values are
1747  * (EINVAL) Invalid parameter
1748  */
1749 int
1750 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1751 		      uint8_t queues[], uint16_t nb_unlinks);
1752 
1753 /**
1754  * Link multiple source event queues supplied in *queues* to the destination
1755  * event port designated by its *port_id* with associated profile identifier
1756  * supplied in *profile_id* with service priorities supplied in *priorities*
1757  * on the event device designated by its *dev_id*.
1758  *
1759  * If *profile_id* is set to 0 then, the links created by the call `rte_event_port_link`
1760  * will be overwritten.
1761  *
1762  * Event ports by default use profile_id 0 unless it is changed using the
1763  * call ``rte_event_port_profile_switch()``.
1764  *
1765  * The link establishment shall enable the event port *port_id* from
1766  * receiving events from the specified event queue(s) supplied in *queues*
1767  *
1768  * An event queue may link to one or more event ports.
1769  * The number of links can be established from an event queue to event port is
1770  * implementation defined.
1771  *
1772  * Event queue(s) to event port link establishment can be changed at runtime
1773  * without re-configuring the device to support scaling and to reduce the
1774  * latency of critical work by establishing the link with more event ports
1775  * at runtime.
1776  *
1777  * @param dev_id
1778  *   The identifier of the device.
1779  *
1780  * @param port_id
1781  *   Event port identifier to select the destination port to link.
1782  *
1783  * @param queues
1784  *   Points to an array of *nb_links* event queues to be linked
1785  *   to the event port.
1786  *   NULL value is allowed, in which case this function links all the configured
1787  *   event queues *nb_event_queues* which previously supplied to
1788  *   rte_event_dev_configure() to the event port *port_id*
1789  *
1790  * @param priorities
1791  *   Points to an array of *nb_links* service priorities associated with each
1792  *   event queue link to event port.
1793  *   The priority defines the event port's servicing priority for
1794  *   event queue, which may be ignored by an implementation.
1795  *   The requested priority should in the range of
1796  *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
1797  *   The implementation shall normalize the requested priority to
1798  *   implementation supported priority value.
1799  *   NULL value is allowed, in which case this function links the event queues
1800  *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
1801  *
1802  * @param nb_links
1803  *   The number of links to establish. This parameter is ignored if queues is
1804  *   NULL.
1805  *
1806  * @param profile_id
1807  *   The profile identifier associated with the links between event queues and
1808  *   event port. Should be less than the max capability reported by
1809  *   ``rte_event_dev_info::max_profiles_per_port``
1810  *
1811  * @return
1812  * The number of links actually established. The return value can be less than
1813  * the value of the *nb_links* parameter when the implementation has the
1814  * limitation on specific queue to port link establishment or if invalid
1815  * parameters are specified in *queues*
1816  * If the return value is less than *nb_links*, the remaining links at the end
1817  * of link[] are not established, and the caller has to take care of them.
1818  * If return value is less than *nb_links* then implementation shall update the
1819  * rte_errno accordingly, Possible rte_errno values are
1820  * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
1821  *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1822  * (EINVAL) Invalid parameter
1823  *
1824  */
1825 __rte_experimental
1826 int
1827 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
1828 				 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
1829 
1830 /**
1831  * Unlink multiple source event queues supplied in *queues* that belong to profile
1832  * designated by *profile_id* from the destination event port designated by its
1833  * *port_id* on the event device designated by its *dev_id*.
1834  *
1835  * If *profile_id* is set to 0 i.e., the default profile then, then this function
1836  * will act as ``rte_event_port_unlink``.
1837  *
1838  * The unlink call issues an async request to disable the event port *port_id*
1839  * from receiving events from the specified event queue *queue_id*.
1840  * Event queue(s) to event port unlink establishment can be changed at runtime
1841  * without re-configuring the device.
1842  *
1843  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
1844  *
1845  * @param dev_id
1846  *   The identifier of the device.
1847  *
1848  * @param port_id
1849  *   Event port identifier to select the destination port to unlink.
1850  *
1851  * @param queues
1852  *   Points to an array of *nb_unlinks* event queues to be unlinked
1853  *   from the event port.
1854  *   NULL value is allowed, in which case this function unlinks all the
1855  *   event queue(s) from the event port *port_id*.
1856  *
1857  * @param nb_unlinks
1858  *   The number of unlinks to establish. This parameter is ignored if queues is
1859  *   NULL.
1860  *
1861  * @param profile_id
1862  *   The profile identifier associated with the links between event queues and
1863  *   event port. Should be less than the max capability reported by
1864  *   ``rte_event_dev_info::max_profiles_per_port``
1865  *
1866  * @return
1867  * The number of unlinks successfully requested. The return value can be less
1868  * than the value of the *nb_unlinks* parameter when the implementation has the
1869  * limitation on specific queue to port unlink establishment or
1870  * if invalid parameters are specified.
1871  * If the return value is less than *nb_unlinks*, the remaining queues at the
1872  * end of queues[] are not unlinked, and the caller has to take care of them.
1873  * If return value is less than *nb_unlinks* then implementation shall update
1874  * the rte_errno accordingly, Possible rte_errno values are
1875  * (EINVAL) Invalid parameter
1876  *
1877  */
1878 __rte_experimental
1879 int
1880 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1881 			      uint16_t nb_unlinks, uint8_t profile_id);
1882 
1883 /**
1884  * Returns the number of unlinks in progress.
1885  *
1886  * This function provides the application with a method to detect when an
1887  * unlink has been completed by the implementation.
1888  *
1889  * @see rte_event_port_unlink() to issue unlink requests.
1890  *
1891  * @param dev_id
1892  *   The identifier of the device.
1893  *
1894  * @param port_id
1895  *   Event port identifier to select port to check for unlinks in progress.
1896  *
1897  * @return
1898  * The number of unlinks that are in progress. A return of zero indicates that
1899  * there are no outstanding unlink requests. A positive return value indicates
1900  * the number of unlinks that are in progress, but are not yet complete.
1901  * A negative return value indicates an error, -EINVAL indicates an invalid
1902  * parameter passed for *dev_id* or *port_id*.
1903  */
1904 int
1905 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
1906 
1907 /**
1908  * Retrieve the list of source event queues and its associated service priority
1909  * linked to the destination event port designated by its *port_id*
1910  * on the event device designated by its *dev_id*.
1911  *
1912  * @param dev_id
1913  *   The identifier of the device.
1914  *
1915  * @param port_id
1916  *   Event port identifier.
1917  *
1918  * @param[out] queues
1919  *   Points to an array of *queues* for output.
1920  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1921  *   store the event queue(s) linked with event port *port_id*
1922  *
1923  * @param[out] priorities
1924  *   Points to an array of *priorities* for output.
1925  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1926  *   store the service priority associated with each event queue linked
1927  *
1928  * @return
1929  * The number of links established on the event port designated by its
1930  *  *port_id*.
1931  * - <0 on failure.
1932  */
1933 int
1934 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1935 			 uint8_t queues[], uint8_t priorities[]);
1936 
1937 /**
1938  * Retrieve the list of source event queues and its service priority
1939  * associated to a *profile_id* and linked to the destination event port
1940  * designated by its *port_id* on the event device designated by its *dev_id*.
1941  *
1942  * @param dev_id
1943  *   The identifier of the device.
1944  *
1945  * @param port_id
1946  *   Event port identifier.
1947  *
1948  * @param[out] queues
1949  *   Points to an array of *queues* for output.
1950  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1951  *   store the event queue(s) linked with event port *port_id*
1952  *
1953  * @param[out] priorities
1954  *   Points to an array of *priorities* for output.
1955  *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1956  *   store the service priority associated with each event queue linked
1957  *
1958  * @param profile_id
1959  *   The profile identifier associated with the links between event queues and
1960  *   event port. Should be less than the max capability reported by
1961  *   ``rte_event_dev_info::max_profiles_per_port``
1962  *
1963  * @return
1964  * The number of links established on the event port designated by its
1965  *  *port_id*.
1966  * - <0 on failure.
1967  */
1968 __rte_experimental
1969 int
1970 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1971 				 uint8_t priorities[], uint8_t profile_id);
1972 
1973 /**
1974  * Retrieve the service ID of the event dev. If the adapter doesn't use
1975  * a rte_service function, this function returns -ESRCH.
1976  *
1977  * @param dev_id
1978  *   The identifier of the device.
1979  *
1980  * @param [out] service_id
1981  *   A pointer to a uint32_t, to be filled in with the service id.
1982  *
1983  * @return
1984  *   - 0: Success
1985  *   - <0: Error code on failure, if the event dev doesn't use a rte_service
1986  *   function, this function returns -ESRCH.
1987  */
1988 int
1989 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1990 
1991 /**
1992  * Dump internal information about *dev_id* to the FILE* provided in *f*.
1993  *
1994  * @param dev_id
1995  *   The identifier of the device.
1996  *
1997  * @param f
1998  *   A pointer to a file for output
1999  *
2000  * @return
2001  *   - 0: on success
2002  *   - <0: on failure.
2003  */
2004 int
2005 rte_event_dev_dump(uint8_t dev_id, FILE *f);
2006 
2007 /** Maximum name length for extended statistics counters */
2008 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
2009 
2010 /**
2011  * Selects the component of the eventdev to retrieve statistics from.
2012  */
2013 enum rte_event_dev_xstats_mode {
2014 	RTE_EVENT_DEV_XSTATS_DEVICE,
2015 	RTE_EVENT_DEV_XSTATS_PORT,
2016 	RTE_EVENT_DEV_XSTATS_QUEUE,
2017 };
2018 
2019 /**
2020  * A name-key lookup element for extended statistics.
2021  *
2022  * This structure is used to map between names and ID numbers
2023  * for extended ethdev statistics.
2024  */
2025 struct rte_event_dev_xstats_name {
2026 	char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
2027 };
2028 
2029 /**
2030  * Retrieve names of extended statistics of an event device.
2031  *
2032  * @param dev_id
2033  *   The identifier of the event device.
2034  * @param mode
2035  *   The mode of statistics to retrieve. Choices include the device statistics,
2036  *   port statistics or queue statistics.
2037  * @param queue_port_id
2038  *   Used to specify the port or queue number in queue or port mode, and is
2039  *   ignored in device mode.
2040  * @param[out] xstats_names
2041  *   Block of memory to insert names into. Must be at least size in capacity.
2042  *   If set to NULL, function returns required capacity.
2043  * @param[out] ids
2044  *   Block of memory to insert ids into. Must be at least size in capacity.
2045  *   If set to NULL, function returns required capacity. The id values returned
2046  *   can be passed to *rte_event_dev_xstats_get* to select statistics.
2047  * @param size
2048  *   Capacity of xstats_names (number of names).
2049  * @return
2050  *   - positive value lower or equal to size: success. The return value
2051  *     is the number of entries filled in the stats table.
2052  *   - positive value higher than size: error, the given statistics table
2053  *     is too small. The return value corresponds to the size that should
2054  *     be given to succeed. The entries in the table are not valid and
2055  *     shall not be used by the caller.
2056  *   - negative value on error:
2057  *        -ENODEV for invalid *dev_id*
2058  *        -EINVAL for invalid mode, queue port or id parameters
2059  *        -ENOTSUP if the device doesn't support this function.
2060  */
2061 int
2062 rte_event_dev_xstats_names_get(uint8_t dev_id,
2063 			       enum rte_event_dev_xstats_mode mode,
2064 			       uint8_t queue_port_id,
2065 			       struct rte_event_dev_xstats_name *xstats_names,
2066 			       uint64_t *ids,
2067 			       unsigned int size);
2068 
2069 /**
2070  * Retrieve extended statistics of an event device.
2071  *
2072  * @param dev_id
2073  *   The identifier of the device.
2074  * @param mode
2075  *  The mode of statistics to retrieve. Choices include the device statistics,
2076  *  port statistics or queue statistics.
2077  * @param queue_port_id
2078  *   Used to specify the port or queue number in queue or port mode, and is
2079  *   ignored in device mode.
2080  * @param ids
2081  *   The id numbers of the stats to get. The ids can be got from the stat
2082  *   position in the stat list from rte_event_dev_get_xstats_names(), or
2083  *   by using rte_event_dev_xstats_by_name_get().
2084  * @param[out] values
2085  *   The values for each stats request by ID.
2086  * @param n
2087  *   The number of stats requested
2088  * @return
2089  *   - positive value: number of stat entries filled into the values array
2090  *   - negative value on error:
2091  *        -ENODEV for invalid *dev_id*
2092  *        -EINVAL for invalid mode, queue port or id parameters
2093  *        -ENOTSUP if the device doesn't support this function.
2094  */
2095 int
2096 rte_event_dev_xstats_get(uint8_t dev_id,
2097 			 enum rte_event_dev_xstats_mode mode,
2098 			 uint8_t queue_port_id,
2099 			 const uint64_t ids[],
2100 			 uint64_t values[], unsigned int n);
2101 
2102 /**
2103  * Retrieve the value of a single stat by requesting it by name.
2104  *
2105  * @param dev_id
2106  *   The identifier of the device
2107  * @param name
2108  *   The stat name to retrieve
2109  * @param[out] id
2110  *   If non-NULL, the numerical id of the stat will be returned, so that further
2111  *   requests for the stat can be got using rte_event_dev_xstats_get, which will
2112  *   be faster as it doesn't need to scan a list of names for the stat.
2113  *   If the stat cannot be found, the id returned will be (unsigned)-1.
2114  * @return
2115  *   - positive value or zero: the stat value
2116  *   - negative value: -EINVAL if stat not found, -ENOTSUP if not supported.
2117  */
2118 uint64_t
2119 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2120 				 uint64_t *id);
2121 
2122 /**
2123  * Reset the values of the xstats of the selected component in the device.
2124  *
2125  * @param dev_id
2126  *   The identifier of the device
2127  * @param mode
2128  *   The mode of the statistics to reset. Choose from device, queue or port.
2129  * @param queue_port_id
2130  *   The queue or port to reset. 0 and positive values select ports and queues,
2131  *   while -1 indicates all ports or queues.
2132  * @param ids
2133  *   Selects specific statistics to be reset. When NULL, all statistics selected
2134  *   by *mode* will be reset. If non-NULL, must point to array of at least
2135  *   *nb_ids* size.
2136  * @param nb_ids
2137  *   The number of ids available from the *ids* array. Ignored when ids is NULL.
2138  * @return
2139  *   - zero: successfully reset the statistics to zero
2140  *   - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported.
2141  */
2142 int
2143 rte_event_dev_xstats_reset(uint8_t dev_id,
2144 			   enum rte_event_dev_xstats_mode mode,
2145 			   int16_t queue_port_id,
2146 			   const uint64_t ids[],
2147 			   uint32_t nb_ids);
2148 
2149 /**
2150  * Trigger the eventdev self test.
2151  *
2152  * @param dev_id
2153  *   The identifier of the device
2154  * @return
2155  *   - 0: Selftest successful
2156  *   - -ENOTSUP if the device doesn't support selftest
2157  *   - other values < 0 on failure.
2158  */
2159 int rte_event_dev_selftest(uint8_t dev_id);
2160 
2161 /**
2162  * Get the memory required per event vector based on the number of elements per
2163  * vector.
2164  * This should be used to create the mempool that holds the event vectors.
2165  *
2166  * @param name
2167  *   The name of the vector pool.
2168  * @param n
2169  *   The number of elements in the mbuf pool.
2170  * @param cache_size
2171  *   Size of the per-core object cache. See rte_mempool_create() for
2172  *   details.
2173  * @param nb_elem
2174  *   The number of elements that a single event vector should be able to hold.
2175  * @param socket_id
2176  *   The socket identifier where the memory should be allocated. The
2177  *   value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the
2178  *   reserved zone
2179  *
2180  * @return
2181  *   The pointer to the newly allocated mempool, on success. NULL on error
2182  *   with rte_errno set appropriately. Possible rte_errno values include:
2183  *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
2184  *    - E_RTE_SECONDARY - function was called from a secondary process instance
2185  *    - EINVAL - cache size provided is too large, or priv_size is not aligned.
2186  *    - ENOSPC - the maximum number of memzones has already been allocated
2187  *    - EEXIST - a memzone with the same name already exists
2188  *    - ENOMEM - no appropriate memory area found in which to create memzone
2189  *    - ENAMETOOLONG - mempool name requested is too long.
2190  */
2191 struct rte_mempool *
2192 rte_event_vector_pool_create(const char *name, unsigned int n,
2193 			     unsigned int cache_size, uint16_t nb_elem,
2194 			     int socket_id);
2195 
2196 #include <rte_eventdev_core.h>
2197 
2198 static __rte_always_inline uint16_t
2199 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2200 			  const struct rte_event ev[], uint16_t nb_events,
2201 			  const event_enqueue_burst_t fn)
2202 {
2203 	const struct rte_event_fp_ops *fp_ops;
2204 	void *port;
2205 
2206 	fp_ops = &rte_event_fp_ops[dev_id];
2207 	port = fp_ops->data[port_id];
2208 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2209 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2210 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2211 		rte_errno = EINVAL;
2212 		return 0;
2213 	}
2214 
2215 	if (port == NULL) {
2216 		rte_errno = EINVAL;
2217 		return 0;
2218 	}
2219 #endif
2220 	rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2221 	/*
2222 	 * Allow zero cost non burst mode routine invocation if application
2223 	 * requests nb_events as const one
2224 	 */
2225 	if (nb_events == 1)
2226 		return (fp_ops->enqueue)(port, ev);
2227 	else
2228 		return fn(port, ev, nb_events);
2229 }
2230 
2231 /**
2232  * Enqueue a burst of events objects or an event object supplied in *rte_event*
2233  * structure on an  event device designated by its *dev_id* through the event
2234  * port specified by *port_id*. Each event object specifies the event queue on
2235  * which it will be enqueued.
2236  *
2237  * The *nb_events* parameter is the number of event objects to enqueue which are
2238  * supplied in the *ev* array of *rte_event* structure.
2239  *
2240  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2241  * enqueued to the same port that their associated events were dequeued from.
2242  *
2243  * The rte_event_enqueue_burst() function returns the number of
2244  * events objects it actually enqueued. A return value equal to *nb_events*
2245  * means that all event objects have been enqueued.
2246  *
2247  * @param dev_id
2248  *   The identifier of the device.
2249  * @param port_id
2250  *   The identifier of the event port.
2251  * @param ev
2252  *   Points to an array of *nb_events* objects of type *rte_event* structure
2253  *   which contain the event object enqueue operations to be processed.
2254  * @param nb_events
2255  *   The number of event objects to enqueue, typically number of
2256  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2257  *   available for this port.
2258  *
2259  * @return
2260  *   The number of event objects actually enqueued on the event device. The
2261  *   return value can be less than the value of the *nb_events* parameter when
2262  *   the event devices queue is full or if invalid parameters are specified in a
2263  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2264  *   events at the end of ev[] are not consumed and the caller has to take care
2265  *   of them, and rte_errno is set accordingly. Possible errno values include:
2266  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2267  *              ID is invalid, or an event's sched type doesn't match the
2268  *              capabilities of the destination queue.
2269  *   - ENOSPC   The event port was backpressured and unable to enqueue
2270  *              one or more events. This error code is only applicable to
2271  *              closed systems.
2272  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2273  */
2274 static inline uint16_t
2275 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2276 			const struct rte_event ev[], uint16_t nb_events)
2277 {
2278 	const struct rte_event_fp_ops *fp_ops;
2279 
2280 	fp_ops = &rte_event_fp_ops[dev_id];
2281 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2282 					 fp_ops->enqueue_burst);
2283 }
2284 
2285 /**
2286  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
2287  * an event device designated by its *dev_id* through the event port specified
2288  * by *port_id*.
2289  *
2290  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2291  * application can use this API when the all objects in the burst contains
2292  * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
2293  * function can provide the additional hint to the PMD and optimize if possible.
2294  *
2295  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2296  * has event object of operation type != RTE_EVENT_OP_NEW.
2297  *
2298  * @param dev_id
2299  *   The identifier of the device.
2300  * @param port_id
2301  *   The identifier of the event port.
2302  * @param ev
2303  *   Points to an array of *nb_events* objects of type *rte_event* structure
2304  *   which contain the event object enqueue operations to be processed.
2305  * @param nb_events
2306  *   The number of event objects to enqueue, typically number of
2307  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2308  *   available for this port.
2309  *
2310  * @return
2311  *   The number of event objects actually enqueued on the event device. The
2312  *   return value can be less than the value of the *nb_events* parameter when
2313  *   the event devices queue is full or if invalid parameters are specified in a
2314  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2315  *   events at the end of ev[] are not consumed and the caller has to take care
2316  *   of them, and rte_errno is set accordingly. Possible errno values include:
2317  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2318  *              ID is invalid, or an event's sched type doesn't match the
2319  *              capabilities of the destination queue.
2320  *   - ENOSPC   The event port was backpressured and unable to enqueue
2321  *              one or more events. This error code is only applicable to
2322  *              closed systems.
2323  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2324  * @see rte_event_enqueue_burst()
2325  */
2326 static inline uint16_t
2327 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2328 			    const struct rte_event ev[], uint16_t nb_events)
2329 {
2330 	const struct rte_event_fp_ops *fp_ops;
2331 
2332 	fp_ops = &rte_event_fp_ops[dev_id];
2333 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2334 					 fp_ops->enqueue_new_burst);
2335 }
2336 
2337 /**
2338  * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
2339  * on an event device designated by its *dev_id* through the event port
2340  * specified by *port_id*.
2341  *
2342  * Provides the same functionality as rte_event_enqueue_burst(), expect that
2343  * application can use this API when the all objects in the burst contains
2344  * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
2345  * function can provide the additional hint to the PMD and optimize if possible.
2346  *
2347  * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
2348  * has event object of operation type != RTE_EVENT_OP_FORWARD.
2349  *
2350  * @param dev_id
2351  *   The identifier of the device.
2352  * @param port_id
2353  *   The identifier of the event port.
2354  * @param ev
2355  *   Points to an array of *nb_events* objects of type *rte_event* structure
2356  *   which contain the event object enqueue operations to be processed.
2357  * @param nb_events
2358  *   The number of event objects to enqueue, typically number of
2359  *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
2360  *   available for this port.
2361  *
2362  * @return
2363  *   The number of event objects actually enqueued on the event device. The
2364  *   return value can be less than the value of the *nb_events* parameter when
2365  *   the event devices queue is full or if invalid parameters are specified in a
2366  *   *rte_event*. If the return value is less than *nb_events*, the remaining
2367  *   events at the end of ev[] are not consumed and the caller has to take care
2368  *   of them, and rte_errno is set accordingly. Possible errno values include:
2369  *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
2370  *              ID is invalid, or an event's sched type doesn't match the
2371  *              capabilities of the destination queue.
2372  *   - ENOSPC   The event port was backpressured and unable to enqueue
2373  *              one or more events. This error code is only applicable to
2374  *              closed systems.
2375  * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
2376  * @see rte_event_enqueue_burst()
2377  */
2378 static inline uint16_t
2379 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2380 				const struct rte_event ev[], uint16_t nb_events)
2381 {
2382 	const struct rte_event_fp_ops *fp_ops;
2383 
2384 	fp_ops = &rte_event_fp_ops[dev_id];
2385 	return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2386 					 fp_ops->enqueue_forward_burst);
2387 }
2388 
2389 /**
2390  * Dequeue a burst of events objects or an event object from the event port
2391  * designated by its *event_port_id*, on an event device designated
2392  * by its *dev_id*.
2393  *
2394  * rte_event_dequeue_burst() does not dictate the specifics of scheduling
2395  * algorithm as each eventdev driver may have different criteria to schedule
2396  * an event. However, in general, from an application perspective scheduler may
2397  * use the following scheme to dispatch an event to the port.
2398  *
2399  * 1) Selection of event queue based on
2400  *   a) The list of event queues are linked to the event port.
2401  *   b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
2402  *   queue selection from list is based on event queue priority relative to
2403  *   other event queue supplied as *priority* in rte_event_queue_setup()
2404  *   c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
2405  *   queue selection from the list is based on event priority supplied as
2406  *   *priority* in rte_event_enqueue_burst()
2407  * 2) Selection of event
2408  *   a) The number of flows available in selected event queue.
2409  *   b) Schedule type method associated with the event
2410  *
2411  * The *nb_events* parameter is the maximum number of event objects to dequeue
2412  * which are returned in the *ev* array of *rte_event* structure.
2413  *
2414  * The rte_event_dequeue_burst() function returns the number of events objects
2415  * it actually dequeued. A return value equal to *nb_events* means that all
2416  * event objects have been dequeued.
2417  *
2418  * The number of events dequeued is the number of scheduler contexts held by
2419  * this port. These contexts are automatically released in the next
2420  * rte_event_dequeue_burst() invocation if the port supports implicit
2421  * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
2422  * operation can be used to release the contexts early.
2423  *
2424  * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
2425  * enqueued to the same port that their associated events were dequeued from.
2426  *
2427  * @param dev_id
2428  *   The identifier of the device.
2429  * @param port_id
2430  *   The identifier of the event port.
2431  * @param[out] ev
2432  *   Points to an array of *nb_events* objects of type *rte_event* structure
2433  *   for output to be populated with the dequeued event objects.
2434  * @param nb_events
2435  *   The maximum number of event objects to dequeue, typically number of
2436  *   rte_event_port_dequeue_depth() available for this port.
2437  *
2438  * @param timeout_ticks
2439  *   - 0 no-wait, returns immediately if there is no event.
2440  *   - >0 wait for the event, if the device is configured with
2441  *   RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
2442  *   at least one event is available or *timeout_ticks* time.
2443  *   if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
2444  *   then this function will wait until the event available or
2445  *   *dequeue_timeout_ns* ns which was previously supplied to
2446  *   rte_event_dev_configure()
2447  *
2448  * @return
2449  * The number of event objects actually dequeued from the port. The return
2450  * value can be less than the value of the *nb_events* parameter when the
2451  * event port's queue is not full.
2452  *
2453  * @see rte_event_port_dequeue_depth()
2454  */
2455 static inline uint16_t
2456 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2457 			uint16_t nb_events, uint64_t timeout_ticks)
2458 {
2459 	const struct rte_event_fp_ops *fp_ops;
2460 	void *port;
2461 
2462 	fp_ops = &rte_event_fp_ops[dev_id];
2463 	port = fp_ops->data[port_id];
2464 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2465 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2466 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2467 		rte_errno = EINVAL;
2468 		return 0;
2469 	}
2470 
2471 	if (port == NULL) {
2472 		rte_errno = EINVAL;
2473 		return 0;
2474 	}
2475 #endif
2476 	rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2477 	/*
2478 	 * Allow zero cost non burst mode routine invocation if application
2479 	 * requests nb_events as const one
2480 	 */
2481 	if (nb_events == 1)
2482 		return (fp_ops->dequeue)(port, ev, timeout_ticks);
2483 	else
2484 		return (fp_ops->dequeue_burst)(port, ev, nb_events,
2485 					       timeout_ticks);
2486 }
2487 
2488 #define RTE_EVENT_DEV_MAINT_OP_FLUSH          (1 << 0)
2489 /**< Force an immediately flush of any buffered events in the port,
2490  * potentially at the cost of additional overhead.
2491  *
2492  * @see rte_event_maintain()
2493  */
2494 
2495 /**
2496  * Maintain an event device.
2497  *
2498  * This function is only relevant for event devices which do not have
2499  * the @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE flag set. Such devices
2500  * require an application thread using a particular port to
2501  * periodically call rte_event_maintain() on that port during periods
2502  * which it is neither attempting to enqueue events to nor dequeue
2503  * events from the port. rte_event_maintain() is a low-overhead
2504  * function and should be called at a high rate (e.g., in the
2505  * application's poll loop).
2506  *
2507  * No port may be left unmaintained.
2508  *
2509  * At the application thread's convenience, rte_event_maintain() may
2510  * (but is not required to) be called even during periods when enqueue
2511  * or dequeue functions are being called, at the cost of a slight
2512  * increase in overhead.
2513  *
2514  * rte_event_maintain() may be called on event devices which have set
2515  * @ref RTE_EVENT_DEV_CAP_MAINTENANCE_FREE, in which case it is a
2516  * no-operation.
2517  *
2518  * @param dev_id
2519  *   The identifier of the device.
2520  * @param port_id
2521  *   The identifier of the event port.
2522  * @param op
2523  *   0, or @ref RTE_EVENT_DEV_MAINT_OP_FLUSH.
2524  * @return
2525  *  - 0 on success.
2526  *  - -EINVAL if *dev_id*,  *port_id*, or *op* is invalid.
2527  *
2528  * @see RTE_EVENT_DEV_CAP_MAINTENANCE_FREE
2529  */
2530 static inline int
2531 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2532 {
2533 	const struct rte_event_fp_ops *fp_ops;
2534 	void *port;
2535 
2536 	fp_ops = &rte_event_fp_ops[dev_id];
2537 	port = fp_ops->data[port_id];
2538 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2539 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2540 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2541 		return -EINVAL;
2542 
2543 	if (port == NULL)
2544 		return -EINVAL;
2545 
2546 	if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2547 		return -EINVAL;
2548 #endif
2549 	rte_eventdev_trace_maintain(dev_id, port_id, op);
2550 
2551 	if (fp_ops->maintain != NULL)
2552 		fp_ops->maintain(port, op);
2553 
2554 	return 0;
2555 }
2556 
2557 /**
2558  * Change the active profile on an event port.
2559  *
2560  * This function is used to change the current active profile on an event port
2561  * when multiple link profiles are configured on an event port through the
2562  * function call ``rte_event_port_profile_links_set``.
2563  *
2564  * On the subsequent ``rte_event_dequeue_burst`` call, only the event queues
2565  * that were associated with the newly active profile will participate in
2566  * scheduling.
2567  *
2568  * @param dev_id
2569  *   The identifier of the device.
2570  * @param port_id
2571  *   The identifier of the event port.
2572  * @param profile_id
2573  *   The identifier of the profile.
2574  * @return
2575  *  - 0 on success.
2576  *  - -EINVAL if *dev_id*,  *port_id*, or *profile_id* is invalid.
2577  */
2578 static inline uint8_t
2579 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2580 {
2581 	const struct rte_event_fp_ops *fp_ops;
2582 	void *port;
2583 
2584 	fp_ops = &rte_event_fp_ops[dev_id];
2585 	port = fp_ops->data[port_id];
2586 
2587 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2588 	if (dev_id >= RTE_EVENT_MAX_DEVS ||
2589 	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2590 		return -EINVAL;
2591 
2592 	if (port == NULL)
2593 		return -EINVAL;
2594 
2595 	if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2596 		return -EINVAL;
2597 #endif
2598 	rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2599 
2600 	return fp_ops->profile_switch(port, profile_id);
2601 }
2602 
2603 #ifdef __cplusplus
2604 }
2605 #endif
2606 
2607 #endif /* _RTE_EVENTDEV_H_ */
2608