xref: /dpdk/lib/eventdev/rte_event_dma_adapter.h (revision 29911b323e7a4200b95e2049df08779c0673fbfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2023 Marvell.
3  */
4 
5 #ifndef RTE_EVENT_DMA_ADAPTER
6 #define RTE_EVENT_DMA_ADAPTER
7 
8 /**
9  * @file rte_event_dma_adapter.h
10  *
11  * @warning
12  * @b EXPERIMENTAL:
13  * All functions in this file may be changed or removed without prior notice.
14  *
15  * DMA Event Adapter API.
16  *
17  * Eventdev library provides adapters to bridge between various components for providing new
18  * event source. The event DMA adapter is one of those adapters which is intended to bridge
19  * between event devices and DMA devices.
20  *
21  * The DMA adapter adds support to enqueue / dequeue DMA operations to / from event device. The
22  * packet flow between DMA device and the event device can be accomplished using both SW and HW
23  * based transfer mechanisms. The adapter uses an EAL service core function for SW based packet
24  * transfer and uses the eventdev PMD functions to configure HW based packet transfer between the
25  * DMA device and the event device.
26  *
27  * The application can choose to submit a DMA operation directly to an DMA device or send it to the
28  * DMA adapter via eventdev based on RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability. The
29  * first mode is known as the event new (RTE_EVENT_DMA_ADAPTER_OP_NEW) mode and the second as the
30  * event forward (RTE_EVENT_DMA_ADAPTER_OP_FORWARD) mode. The choice of mode can be specified while
31  * creating the adapter. In the former mode, it is an application responsibility to enable ingress
32  * packet ordering. In the latter mode, it is the adapter responsibility to enable the ingress
33  * packet ordering.
34  *
35  *
36  * Working model of RTE_EVENT_DMA_ADAPTER_OP_NEW mode:
37  *
38  *                +--------------+         +--------------+
39  *                |              |         |   DMA stage  |
40  *                | Application  |---[2]-->| + enqueue to |
41  *                |              |         |     dmadev   |
42  *                +--------------+         +--------------+
43  *                    ^   ^                       |
44  *                    |   |                      [3]
45  *                   [6] [1]                      |
46  *                    |   |                       |
47  *                +--------------+                |
48  *                |              |                |
49  *                | Event device |                |
50  *                |              |                |
51  *                +--------------+                |
52  *                       ^                        |
53  *                       |                        |
54  *                      [5]                       |
55  *                       |                        v
56  *                +--------------+         +--------------+
57  *                |              |         |              |
58  *                |  DMA adapter |<--[4]---|    dmadev    |
59  *                |              |         |              |
60  *                +--------------+         +--------------+
61  *
62  *
63  *         [1] Application dequeues events from the previous stage.
64  *         [2] Application prepares the DMA operations.
65  *         [3] DMA operations are submitted to dmadev by application.
66  *         [4] DMA adapter dequeues DMA completions from dmadev.
67  *         [5] DMA adapter enqueues events to the eventdev.
68  *         [6] Application dequeues from eventdev for further processing.
69  *
70  * In the RTE_EVENT_DMA_ADAPTER_OP_NEW mode, application submits DMA operations directly to DMA
71  * device. The DMA adapter then dequeues DMA completions from DMA device and enqueue events to the
72  * event device. This mode does not ensure ingress ordering, if the application directly enqueues
73  * to dmadev without going through DMA / atomic stage i.e. removing item [1] and [2].
74  *
75  * Events dequeued from the adapter will be treated as new events. In this mode, application needs
76  * to specify event information (response information) which is needed to enqueue an event after the
77  * DMA operation is completed.
78  *
79  *
80  * Working model of RTE_EVENT_DMA_ADAPTER_OP_FORWARD mode:
81  *
82  *                +--------------+         +--------------+
83  *        --[1]-->|              |---[2]-->|  Application |
84  *                | Event device |         |      in      |
85  *        <--[8]--|              |<--[3]---| Ordered stage|
86  *                +--------------+         +--------------+
87  *                    ^      |
88  *                    |     [4]
89  *                   [7]     |
90  *                    |      v
91  *               +----------------+       +--------------+
92  *               |                |--[5]->|              |
93  *               |   DMA adapter  |       |     dmadev   |
94  *               |                |<-[6]--|              |
95  *               +----------------+       +--------------+
96  *
97  *
98  *         [1] Events from the previous stage.
99  *         [2] Application in ordered stage dequeues events from eventdev.
100  *         [3] Application enqueues DMA operations as events to eventdev.
101  *         [4] DMA adapter dequeues event from eventdev.
102  *         [5] DMA adapter submits DMA operations to dmadev (Atomic stage).
103  *         [6] DMA adapter dequeues DMA completions from dmadev
104  *         [7] DMA adapter enqueues events to the eventdev
105  *         [8] Events to the next stage
106  *
107  * In the event forward (RTE_EVENT_DMA_ADAPTER_OP_FORWARD) mode, if the HW supports the capability
108  * RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD, application can directly submit the DMA
109  * operations to the dmadev. If not, application retrieves the event port of the DMA adapter
110  * through the API, rte_event_DMA_adapter_event_port_get(). Then, links its event queue to this
111  * port and starts enqueuing DMA operations as events to the eventdev. The adapter then dequeues
112  * the events and submits the DMA operations to the dmadev. After the DMA completions, the adapter
113  * enqueues events to the event device.
114  *
115  * Application can use this mode, when ingress packet ordering is needed. Events dequeued from the
116  * adapter will be treated as forwarded events. In this mode, the application needs to specify the
117  * dmadev ID and queue pair ID (request information) needed to enqueue an DMA operation in addition
118  * to the event information (response information) needed to enqueue an event after the DMA
119  * operation has completed.
120  *
121  * The event DMA adapter provides common APIs to configure the packet flow from the DMA device to
122  * event devices for both SW and HW based transfers. The DMA event adapter's functions are:
123  *
124  *  - rte_event_dma_adapter_create_ext()
125  *  - rte_event_dma_adapter_create()
126  *  - rte_event_dma_adapter_free()
127  *  - rte_event_dma_adapter_vchan_add()
128  *  - rte_event_dma_adapter_vchan_del()
129  *  - rte_event_dma_adapter_start()
130  *  - rte_event_dma_adapter_stop()
131  *  - rte_event_dma_adapter_stats_get()
132  *  - rte_event_dma_adapter_stats_reset()
133  *
134  * The application creates an instance using rte_event_dma_adapter_create() or
135  * rte_event_dma_adapter_create_ext().
136  *
137  * dmadev queue pair addition / deletion is done using the rte_event_dma_adapter_vchan_add() /
138  * rte_event_dma_adapter_vchan_del() APIs. If HW supports the capability
139  * RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND, event information must be passed to the
140  * add API.
141  *
142  */
143 
144 #include <stdint.h>
145 
146 #include <rte_common.h>
147 #include <rte_dmadev_pmd.h>
148 #include <rte_eventdev.h>
149 
150 #ifdef __cplusplus
151 extern "C" {
152 #endif
153 
154 /**
155  * A structure used to hold event based DMA operation entry. All the information
156  * required for a DMA transfer shall be populated in "struct rte_event_dma_adapter_op"
157  * instance.
158  */
159 struct rte_event_dma_adapter_op {
160 	uint64_t flags;
161 	/**< Flags related to the operation.
162 	 * @see RTE_DMA_OP_FLAG_*
163 	 */
164 	struct rte_mempool *op_mp;
165 	/**< Mempool from which op is allocated. */
166 	enum rte_dma_status_code status;
167 	/**< Status code for this operation. */
168 	uint32_t rsvd;
169 	/**< Reserved for future use. */
170 	uint64_t impl_opaque[2];
171 	/**< Implementation-specific opaque data.
172 	 * An dma device implementation use this field to hold
173 	 * implementation specific values to share between dequeue and enqueue
174 	 * operations.
175 	 * The application should not modify this field.
176 	 */
177 	uint64_t user_meta;
178 	/**<  Memory to store user specific metadata.
179 	 * The dma device implementation should not modify this area.
180 	 */
181 	uint64_t event_meta;
182 	/**< Event metadata of DMA completion event.
183 	 * Used when RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND is not
184 	 * supported in OP_NEW mode.
185 	 * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_NEW
186 	 * @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND
187 	 *
188 	 * Used when RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD is not
189 	 * supported in OP_FWD mode.
190 	 * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
191 	 * @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD
192 	 *
193 	 * @see struct rte_event::event
194 	 */
195 	int16_t dma_dev_id;
196 	/**< DMA device ID to be used with OP_FORWARD mode.
197 	 * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
198 	 */
199 	uint16_t vchan;
200 	/**< DMA vchan ID to be used with OP_FORWARD mode
201 	 * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
202 	 */
203 	uint16_t nb_src;
204 	/**< Number of source segments. */
205 	uint16_t nb_dst;
206 	/**< Number of destination segments. */
207 	struct rte_dma_sge src_dst_seg[];
208 	/**< Source and destination segments. */
209 };
210 
211 /**
212  *  DMA event adapter mode
213  */
214 enum rte_event_dma_adapter_mode {
215 	RTE_EVENT_DMA_ADAPTER_OP_NEW,
216 	/**< Start the DMA adapter in event new mode.
217 	 * @see RTE_EVENT_OP_NEW.
218 	 *
219 	 * Application submits DMA operations to the dmadev. Adapter only dequeues the DMA
220 	 * completions from dmadev and enqueue events to the eventdev.
221 	 */
222 
223 	RTE_EVENT_DMA_ADAPTER_OP_FORWARD,
224 	/**< Start the DMA adapter in event forward mode.
225 	 * @see RTE_EVENT_OP_FORWARD.
226 	 *
227 	 * Application submits DMA requests as events to the DMA adapter or DMA device based on
228 	 * RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability. DMA completions are enqueued
229 	 * back to the eventdev by DMA adapter.
230 	 */
231 };
232 
233 /**
234  * Adapter configuration structure that the adapter configuration callback function is expected to
235  * fill out.
236  *
237  * @see rte_event_dma_adapter_conf_cb
238  */
239 struct rte_event_dma_adapter_conf {
240 	uint8_t event_port_id;
241 	/** < Event port identifier, the adapter enqueues events to this port and dequeues DMA
242 	 * request events in RTE_EVENT_DMA_ADAPTER_OP_FORWARD mode.
243 	 */
244 
245 	uint32_t max_nb;
246 	/**< The adapter can return early if it has processed at least max_nb DMA ops. This isn't
247 	 * treated as a requirement; batching may cause the adapter to process more than max_nb DMA
248 	 * ops.
249 	 */
250 };
251 
252 /**
253  * Adapter runtime configuration parameters
254  */
255 struct rte_event_dma_adapter_runtime_params {
256 	uint32_t max_nb;
257 	/**< The adapter can return early if it has processed at least max_nb DMA ops. This isn't
258 	 * treated as a requirement; batching may cause the adapter to process more than max_nb DMA
259 	 * ops.
260 	 *
261 	 * Callback function passed to rte_event_dma_adapter_create_ext() configures the adapter
262 	 * with default value of max_nb.
263 	 * rte_event_dma_adapter_runtime_params_set() allows to re-configure max_nb during runtime
264 	 * (after adding at least one queue pair)
265 	 *
266 	 * This is valid for the devices without RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD or
267 	 * RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW capability.
268 	 */
269 
270 	uint32_t rsvd[15];
271 	/**< Reserved fields for future expansion */
272 };
273 
274 /**
275  * Function type used for adapter configuration callback. The callback is used to fill in members of
276  * the struct rte_event_dma_adapter_conf, this callback is invoked when creating a SW service for
277  * packet transfer from dmadev vchan to the event device. The SW service is created within the
278  * function, rte_event_dma_adapter_vchan_add(), if SW based packet transfers from dmadev vchan
279  * to the event device are required.
280  *
281  * @param id
282  *     Adapter identifier.
283  * @param evdev_id
284  *     Event device identifier.
285  * @param conf
286  *     Structure that needs to be populated by this callback.
287  * @param arg
288  *     Argument to the callback. This is the same as the conf_arg passed to the
289  * rte_event_dma_adapter_create_ext().
290  */
291 typedef int (*rte_event_dma_adapter_conf_cb)(uint8_t id, uint8_t evdev_id,
292 					     struct rte_event_dma_adapter_conf *conf, void *arg);
293 
294 /**
295  * A structure used to retrieve statistics for an event DMA adapter instance.
296  */
297 struct rte_event_dma_adapter_stats {
298 	uint64_t event_poll_count;
299 	/**< Event port poll count */
300 
301 	uint64_t event_deq_count;
302 	/**< Event dequeue count */
303 
304 	uint64_t dma_enq_count;
305 	/**< dmadev enqueue count */
306 
307 	uint64_t dma_enq_fail_count;
308 	/**< dmadev enqueue failed count */
309 
310 	uint64_t dma_deq_count;
311 	/**< dmadev dequeue count */
312 
313 	uint64_t event_enq_count;
314 	/**< Event enqueue count */
315 
316 	uint64_t event_enq_retry_count;
317 	/**< Event enqueue retry count */
318 
319 	uint64_t event_enq_fail_count;
320 	/**< Event enqueue fail count */
321 };
322 
323 /**
324  * Create a new event DMA adapter with the specified identifier.
325  *
326  * @param id
327  *     Adapter identifier.
328  * @param evdev_id
329  *     Event device identifier.
330  * @param conf_cb
331  *     Callback function that fills in members of a struct rte_event_dma_adapter_conf struct passed
332  * into it.
333  * @param mode
334  *     Flag to indicate the mode of the adapter.
335  *     @see rte_event_dma_adapter_mode
336  * @param conf_arg
337  *     Argument that is passed to the conf_cb function.
338  *
339  * @return
340  *     - 0: Success
341  *     - <0: Error code on failure
342  */
343 __rte_experimental
344 int rte_event_dma_adapter_create_ext(uint8_t id, uint8_t evdev_id,
345 				     rte_event_dma_adapter_conf_cb conf_cb,
346 				     enum rte_event_dma_adapter_mode mode, void *conf_arg);
347 
348 /**
349  * Create a new event DMA adapter with the specified identifier. This function uses an internal
350  * configuration function that creates an event port. This default function reconfigures the event
351  * device with an additional event port and set up the event port using the port_config parameter
352  * passed into this function. In case the application needs more control in configuration of the
353  * service, it should use the rte_event_dma_adapter_create_ext() version.
354  *
355  * @param id
356  *     Adapter identifier.
357  * @param evdev_id
358  *     Event device identifier.
359  * @param port_config
360  *     Argument of type *rte_event_port_conf* that is passed to the conf_cb function.
361  * @param mode
362  *     Flag to indicate the mode of the adapter.
363  *     @see rte_event_dma_adapter_mode
364  *
365  * @return
366  *     - 0: Success
367  *     - <0: Error code on failure
368  */
369 __rte_experimental
370 int rte_event_dma_adapter_create(uint8_t id, uint8_t evdev_id,
371 				 struct rte_event_port_conf *port_config,
372 				 enum rte_event_dma_adapter_mode mode);
373 
374 /**
375  * Free an event DMA adapter
376  *
377  * @param id
378  *     Adapter identifier.
379  * @return
380  *     - 0: Success
381  *     - <0: Error code on failure, If the adapter still has queue pairs added to it, the function
382  * returns -EBUSY.
383  */
384 __rte_experimental
385 int rte_event_dma_adapter_free(uint8_t id);
386 
387 /**
388  * Retrieve the event port of an adapter.
389  *
390  * @param id
391  *     Adapter identifier.
392  *
393  * @param [out] event_port_id
394  *     Application links its event queue to this adapter port which is used in
395  * RTE_EVENT_DMA_ADAPTER_OP_FORWARD mode.
396  *
397  * @return
398  *     - 0: Success
399  *     - <0: Error code on failure.
400  */
401 __rte_experimental
402 int rte_event_dma_adapter_event_port_get(uint8_t id, uint8_t *event_port_id);
403 
404 /**
405  * Add a vchan to an event DMA adapter.
406  *
407  * @param id
408  *     Adapter identifier.
409  * @param dmadev_id
410  *     dmadev identifier.
411  * @param vchan
412  *     DMA device vchan identifier. If vchan is set -1, adapter adds all the
413  * preconfigured vchan to the instance.
414  * @param event
415  *     If HW supports dmadev vchan to event queue binding, application is expected to fill in
416  * event information, else it will be NULL.
417  *     @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND
418  *
419  * @return
420  *     - 0: Success, vchan added correctly.
421  *     - <0: Error code on failure.
422  */
423 __rte_experimental
424 int rte_event_dma_adapter_vchan_add(uint8_t id, int16_t dmadev_id, uint16_t vchan,
425 				    const struct rte_event *event);
426 
427 /**
428  * Delete a vchan from an event DMA adapter.
429  *
430  * @param id
431  *     Adapter identifier.
432  * @param dmadev_id
433  *     DMA device identifier.
434  * @param vchan
435  *     DMA device vchan identifier.
436  *
437  * @return
438  *     - 0: Success, vchan deleted successfully.
439  *     - <0: Error code on failure.
440  */
441 __rte_experimental
442 int rte_event_dma_adapter_vchan_del(uint8_t id, int16_t dmadev_id, uint16_t vchan);
443 
444 /**
445  * Retrieve the service ID of an adapter. If the adapter doesn't use a rte_service function, this
446  * function returns -ESRCH.
447  *
448  * @param id
449  *     Adapter identifier.
450  * @param [out] service_id
451  *     A pointer to a uint32_t, to be filled in with the service id.
452  *
453  * @return
454  *     - 0: Success
455  *     - <0: Error code on failure, if the adapter doesn't use a rte_service function, this function
456  * returns -ESRCH.
457  */
458 __rte_experimental
459 int rte_event_dma_adapter_service_id_get(uint8_t id, uint32_t *service_id);
460 
461 /**
462  * Start event DMA adapter
463  *
464  * @param id
465  *     Adapter identifier.
466  *
467  * @return
468  *     - 0: Success, adapter started successfully.
469  *     - <0: Error code on failure.
470  *
471  * @note The eventdev and dmadev to which the event_dma_adapter is connected should be started
472  * before calling rte_event_dma_adapter_start().
473  */
474 __rte_experimental
475 int rte_event_dma_adapter_start(uint8_t id);
476 
477 /**
478  * Stop event DMA adapter
479  *
480  * @param id
481  *  Adapter identifier.
482  *
483  * @return
484  *  - 0: Success, adapter stopped successfully.
485  *  - <0: Error code on failure.
486  */
487 __rte_experimental
488 int rte_event_dma_adapter_stop(uint8_t id);
489 
490 /**
491  * Initialize the adapter runtime configuration parameters
492  *
493  * @param params
494  *  A pointer to structure of type struct rte_event_dma_adapter_runtime_params
495  *
496  * @return
497  *  -  0: Success
498  *  - <0: Error code on failure
499  */
500 __rte_experimental
501 int rte_event_dma_adapter_runtime_params_init(struct rte_event_dma_adapter_runtime_params *params);
502 
503 /**
504  * Set the adapter runtime configuration parameters
505  *
506  * @param id
507  *  Adapter identifier
508  *
509  * @param params
510  *  A pointer to structure of type struct rte_event_dma_adapter_runtime_params with configuration
511  * parameter values. The reserved fields of this structure must be initialized to zero and the valid
512  * fields need to be set appropriately. This struct can be initialized using
513  * rte_event_dma_adapter_runtime_params_init() API to default values or application may reset this
514  * struct and update required fields.
515  *
516  * @return
517  *  -  0: Success
518  *  - <0: Error code on failure
519  */
520 __rte_experimental
521 int rte_event_dma_adapter_runtime_params_set(uint8_t id,
522 					     struct rte_event_dma_adapter_runtime_params *params);
523 
524 /**
525  * Get the adapter runtime configuration parameters
526  *
527  * @param id
528  *  Adapter identifier
529  *
530  * @param[out] params
531  *  A pointer to structure of type struct rte_event_dma_adapter_runtime_params containing valid
532  * adapter parameters when return value is 0.
533  *
534  * @return
535  *  -  0: Success
536  *  - <0: Error code on failure
537  */
538 __rte_experimental
539 int rte_event_dma_adapter_runtime_params_get(uint8_t id,
540 					     struct rte_event_dma_adapter_runtime_params *params);
541 
542 /**
543  * Retrieve statistics for an adapter
544  *
545  * @param id
546  *     Adapter identifier.
547  * @param [out] stats
548  *     A pointer to structure used to retrieve statistics for an adapter.
549  *
550  * @return
551  *     - 0: Success, retrieved successfully.
552  *     - <0: Error code on failure.
553  */
554 __rte_experimental
555 int rte_event_dma_adapter_stats_get(uint8_t id, struct rte_event_dma_adapter_stats *stats);
556 
557 /**
558  * Reset statistics for an adapter.
559  *
560  * @param id
561  *     Adapter identifier.
562  *
563  * @return
564  *     - 0: Success, statistics reset successfully.
565  *     - <0: Error code on failure.
566  */
567 __rte_experimental
568 int rte_event_dma_adapter_stats_reset(uint8_t id);
569 
570 /**
571  * Enqueue a burst of DMA operations as event objects supplied in *rte_event* structure on an event
572  * DMA adapter designated by its event *evdev_id* through the event port specified by *port_id*.
573  * This function is supported if the eventdev PMD has the
574  * #RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability flag set.
575  *
576  * The *nb_events* parameter is the number of event objects to enqueue that are supplied in the
577  * *ev* array of *rte_event* structure.
578  *
579  * The rte_event_dma_adapter_enqueue() function returns the number of event objects it actually
580  * enqueued. A return value equal to *nb_events* means that all event objects have been enqueued.
581  *
582  * @param evdev_id
583  *     The identifier of the device.
584  * @param port_id
585  *     The identifier of the event port.
586  * @param ev
587  *     Points to an array of *nb_events* objects of type *rte_event* structure which contain the
588  * event object enqueue operations to be processed.
589  * @param nb_events
590  *     The number of event objects to enqueue, typically number of
591  * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...) available for this port.
592  *
593  * @return
594  *     The number of event objects actually enqueued on the event device. The return value can be
595  * less than the value of the *nb_events* parameter when the event devices queue is full or if
596  * invalid parameters are specified in a *rte_event*. If the return value is less than *nb_events*,
597  * the remaining events at the end of ev[] are not consumed and the caller has to take care of them,
598  * and rte_errno is set accordingly. Possible errno values include:
599  *     - EINVAL: The port ID is invalid, device ID is invalid, an event's queue ID is invalid, or an
600  * event's sched type doesn't match the capabilities of the destination queue.
601  *     - ENOSPC: The event port was backpressured and unable to enqueue one or more events. This
602  * error code is only applicable to closed systems.
603  */
604 __rte_experimental
605 uint16_t rte_event_dma_adapter_enqueue(uint8_t evdev_id, uint8_t port_id, struct rte_event ev[],
606 				       uint16_t nb_events);
607 
608 #ifdef __cplusplus
609 }
610 #endif
611 
612 #endif /* RTE_EVENT_DMA_ADAPTER */
613