xref: /dpdk/lib/dmadev/rte_dmadev.h (revision 42a8fc7daa46256d150278fc9a7a846e27945a0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 HiSilicon Limited
3  * Copyright(c) 2021 Intel Corporation
4  * Copyright(c) 2021 Marvell International Ltd
5  * Copyright(c) 2021 SmartShare Systems
6  */
7 
8 #ifndef RTE_DMADEV_H
9 #define RTE_DMADEV_H
10 
11 /**
12  * @file rte_dmadev.h
13  *
14  * DMA (Direct Memory Access) device API.
15  *
16  * The DMA framework is built on the following model:
17  *
18  *     ---------------   ---------------       ---------------
19  *     | virtual DMA |   | virtual DMA |       | virtual DMA |
20  *     | channel     |   | channel     |       | channel     |
21  *     ---------------   ---------------       ---------------
22  *            |                |                      |
23  *            ------------------                      |
24  *                     |                              |
25  *               ------------                    ------------
26  *               |  dmadev  |                    |  dmadev  |
27  *               ------------                    ------------
28  *                     |                              |
29  *            ------------------               ------------------
30  *            | HW DMA channel |               | HW DMA channel |
31  *            ------------------               ------------------
32  *                     |                              |
33  *                     --------------------------------
34  *                                     |
35  *                           ---------------------
36  *                           | HW DMA Controller |
37  *                           ---------------------
38  *
39  * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
40  * each HW-DMA-channel should be represented by a dmadev.
41  *
42  * The dmadev could create multiple virtual DMA channels, each virtual DMA
43  * channel represents a different transfer context. The DMA operation request
44  * must be submitted to the virtual DMA channel. e.g. Application could create
45  * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
46  * virtual DMA channel 1 for memory-to-device transfer scenario.
47  *
48  * This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
49  * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
50  *
51  * The functions exported by the dmadev API to setup a device designated by its
52  * device identifier must be invoked in the following order:
53  *     - rte_dma_configure()
54  *     - rte_dma_vchan_setup()
55  *     - rte_dma_start()
56  *
57  * Then, the application can invoke dataplane functions to process jobs.
58  *
59  * If the application wants to change the configuration (i.e. invoke
60  * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
61  * rte_dma_stop() first to stop the device and then do the reconfiguration
62  * before invoking rte_dma_start() again. The dataplane functions should not
63  * be invoked when the device is stopped.
64  *
65  * Finally, an application can close a dmadev by invoking the rte_dma_close()
66  * function.
67  *
68  * The dataplane APIs include two parts:
69  * The first part is the submission of operation requests:
70  *     - rte_dma_copy()
71  *     - rte_dma_copy_sg()
72  *     - rte_dma_fill()
73  *     - rte_dma_submit()
74  *
75  * These APIs could work with different virtual DMA channels which have
76  * different contexts.
77  *
78  * The first three APIs are used to submit the operation request to the virtual
79  * DMA channel, if the submission is successful, a positive
80  * ring_idx <= UINT16_MAX is returned, otherwise a negative number is returned.
81  *
82  * The last API is used to issue doorbell to hardware, and also there are flags
83  * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
84  * same work.
85  * @note When enqueuing a set of jobs to the device, having a separate submit
86  * outside a loop makes for clearer code than having a check for the last
87  * iteration inside the loop to set a special submit flag.  However, for cases
88  * where one item alone is to be submitted or there is a small set of jobs to
89  * be submitted sequentially, having a submit flag provides a lower-overhead
90  * way of doing the submission while still keeping the code clean.
91  *
92  * The second part is to obtain the result of requests:
93  *     - rte_dma_completed()
94  *         - return the number of operation requests completed successfully.
95  *     - rte_dma_completed_status()
96  *         - return the number of operation requests completed.
97  *
98  * @note If the dmadev works in silent mode (@see RTE_DMA_CAPA_SILENT),
99  * application does not invoke the above two completed APIs.
100  *
101  * About the ring_idx which enqueue APIs (e.g. rte_dma_copy(), rte_dma_fill())
102  * return, the rules are as follows:
103  *     - ring_idx for each virtual DMA channel are independent.
104  *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
105  *       when it reach UINT16_MAX, it wraps back to zero.
106  *     - This ring_idx can be used by applications to track per-operation
107  *       metadata in an application-defined circular ring.
108  *     - The initial ring_idx of a virtual DMA channel is zero, after the
109  *       device is stopped, the ring_idx needs to be reset to zero.
110  *
111  * One example:
112  *     - step-1: start one dmadev
113  *     - step-2: enqueue a copy operation, the ring_idx return is 0
114  *     - step-3: enqueue a copy operation again, the ring_idx return is 1
115  *     - ...
116  *     - step-101: stop the dmadev
117  *     - step-102: start the dmadev
118  *     - step-103: enqueue a copy operation, the ring_idx return is 0
119  *     - ...
120  *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
121  *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
122  *     - ...
123  *
124  * The DMA operation address used in enqueue APIs (i.e. rte_dma_copy(),
125  * rte_dma_copy_sg(), rte_dma_fill()) is defined as rte_iova_t type.
126  *
127  * The dmadev supports two types of address: memory address and device address.
128  *
129  * - memory address: the source and destination address of the memory-to-memory
130  * transfer type, or the source address of the memory-to-device transfer type,
131  * or the destination address of the device-to-memory transfer type.
132  * @note If the device support SVA (@see RTE_DMA_CAPA_SVA), the memory address
133  * can be any VA address, otherwise it must be an IOVA address.
134  *
135  * - device address: the source and destination address of the device-to-device
136  * transfer type, or the source address of the device-to-memory transfer type,
137  * or the destination address of the memory-to-device transfer type.
138  *
139  * About MT-safe, all the functions of the dmadev API implemented by a PMD are
140  * lock-free functions which assume to not be invoked in parallel on different
141  * logical cores to work on the same target dmadev object.
142  * @note Different virtual DMA channels on the same dmadev *DO NOT* support
143  * parallel invocation because these virtual DMA channels share the same
144  * HW-DMA-channel.
145  */
146 
147 #include <stdint.h>
148 
149 #include <rte_bitops.h>
150 #include <rte_common.h>
151 #include <rte_compat.h>
152 
153 #ifdef __cplusplus
154 extern "C" {
155 #endif
156 
157 /** Maximum number of devices if rte_dma_dev_max() is not called. */
158 #define RTE_DMADEV_DEFAULT_MAX 64
159 
160 /**
161  * @warning
162  * @b EXPERIMENTAL: this API may change without prior notice.
163  *
164  * Configure the maximum number of dmadevs.
165  * @note This function can be invoked before the primary process rte_eal_init()
166  * to change the maximum number of dmadevs. If not invoked, the maximum number
167  * of dmadevs is @see RTE_DMADEV_DEFAULT_MAX
168  *
169  * @param dev_max
170  *   maximum number of dmadevs.
171  *
172  * @return
173  *   0 on success. Otherwise negative value is returned.
174  */
175 __rte_experimental
176 int rte_dma_dev_max(size_t dev_max);
177 
178 /**
179  * @warning
180  * @b EXPERIMENTAL: this API may change without prior notice.
181  *
182  * Get the device identifier for the named DMA device.
183  *
184  * @param name
185  *   DMA device name.
186  *
187  * @return
188  *   Returns DMA device identifier on success.
189  *   - <0: Failure to find named DMA device.
190  */
191 __rte_experimental
192 int rte_dma_get_dev_id_by_name(const char *name);
193 
194 /**
195  * @warning
196  * @b EXPERIMENTAL: this API may change without prior notice.
197  *
198  * Check whether the dev_id is valid.
199  *
200  * @param dev_id
201  *   DMA device index.
202  *
203  * @return
204  *   - If the device index is valid (true) or not (false).
205  */
206 __rte_experimental
207 bool rte_dma_is_valid(int16_t dev_id);
208 
209 /**
210  * @warning
211  * @b EXPERIMENTAL: this API may change without prior notice.
212  *
213  * Get the total number of DMA devices that have been successfully
214  * initialised.
215  *
216  * @return
217  *   The total number of usable DMA devices.
218  */
219 __rte_experimental
220 uint16_t rte_dma_count_avail(void);
221 
222 /**
223  * Iterates over valid dmadev instances.
224  *
225  * @param start_dev_id
226  *   The id of the next possible dmadev.
227  * @return
228  *   Next valid dmadev, UINT16_MAX if there is none.
229  */
230 __rte_experimental
231 int16_t rte_dma_next_dev(int16_t start_dev_id);
232 
233 /** Utility macro to iterate over all available dmadevs */
234 #define RTE_DMA_FOREACH_DEV(p) \
235 	for (p = rte_dma_next_dev(0); \
236 	     p != -1; \
237 	     p = rte_dma_next_dev(p + 1))
238 
239 
240 /**@{@name DMA capability
241  * @see struct rte_dma_info::dev_capa
242  */
243 /** Support memory-to-memory transfer */
244 #define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
245 /** Support memory-to-device transfer. */
246 #define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)
247 /** Support device-to-memory transfer. */
248 #define RTE_DMA_CAPA_DEV_TO_MEM		RTE_BIT64(2)
249 /** Support device-to-device transfer. */
250 #define RTE_DMA_CAPA_DEV_TO_DEV		RTE_BIT64(3)
251 /** Support SVA which could use VA as DMA address.
252  * If device support SVA then application could pass any VA address like memory
253  * from rte_malloc(), rte_memzone(), malloc, stack memory.
254  * If device don't support SVA, then application should pass IOVA address which
255  * from rte_malloc(), rte_memzone().
256  */
257 #define RTE_DMA_CAPA_SVA                RTE_BIT64(4)
258 /** Support work in silent mode.
259  * In this mode, application don't required to invoke rte_dma_completed*()
260  * API.
261  * @see struct rte_dma_conf::silent_mode
262  */
263 #define RTE_DMA_CAPA_SILENT             RTE_BIT64(5)
264 /** Supports error handling
265  *
266  * With this bit set, invalid input addresses will be reported as operation failures
267  * to the user but other operations can continue.
268  * Without this bit set, invalid data is not handled by either HW or driver, so user
269  * must ensure that all memory addresses are valid and accessible by HW.
270  */
271 #define RTE_DMA_CAPA_HANDLES_ERRORS	RTE_BIT64(6)
272 /** Support copy operation.
273  * This capability start with index of 32, so that it could leave gap between
274  * normal capability and ops capability.
275  */
276 #define RTE_DMA_CAPA_OPS_COPY           RTE_BIT64(32)
277 /** Support scatter-gather list copy operation. */
278 #define RTE_DMA_CAPA_OPS_COPY_SG	RTE_BIT64(33)
279 /** Support fill operation. */
280 #define RTE_DMA_CAPA_OPS_FILL		RTE_BIT64(34)
281 /**@}*/
282 
283 /**
284  * A structure used to retrieve the information of a DMA device.
285  *
286  * @see rte_dma_info_get
287  */
288 struct rte_dma_info {
289 	const char *dev_name; /**< Unique device name. */
290 	/** Device capabilities (RTE_DMA_CAPA_*). */
291 	uint64_t dev_capa;
292 	/** Maximum number of virtual DMA channels supported. */
293 	uint16_t max_vchans;
294 	/** Maximum allowed number of virtual DMA channel descriptors. */
295 	uint16_t max_desc;
296 	/** Minimum allowed number of virtual DMA channel descriptors. */
297 	uint16_t min_desc;
298 	/** Maximum number of source or destination scatter-gather entry
299 	 * supported.
300 	 * If the device does not support COPY_SG capability, this value can be
301 	 * zero.
302 	 * If the device supports COPY_SG capability, then rte_dma_copy_sg()
303 	 * parameter nb_src/nb_dst should not exceed this value.
304 	 */
305 	uint16_t max_sges;
306 	/** NUMA node connection, -1 if unknown. */
307 	int16_t numa_node;
308 	/** Number of virtual DMA channel configured. */
309 	uint16_t nb_vchans;
310 };
311 
312 /**
313  * @warning
314  * @b EXPERIMENTAL: this API may change without prior notice.
315  *
316  * Retrieve information of a DMA device.
317  *
318  * @param dev_id
319  *   The identifier of the device.
320  * @param[out] dev_info
321  *   A pointer to a structure of type *rte_dma_info* to be filled with the
322  *   information of the device.
323  *
324  * @return
325  *   0 on success. Otherwise negative value is returned.
326  */
327 __rte_experimental
328 int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info);
329 
330 /**
331  * A structure used to configure a DMA device.
332  *
333  * @see rte_dma_configure
334  */
335 struct rte_dma_conf {
336 	/** The number of virtual DMA channels to set up for the DMA device.
337 	 * This value cannot be greater than the field 'max_vchans' of struct
338 	 * rte_dma_info which get from rte_dma_info_get().
339 	 */
340 	uint16_t nb_vchans;
341 	/** Indicates whether to enable silent mode.
342 	 * false-default mode, true-silent mode.
343 	 * This value can be set to true only when the SILENT capability is
344 	 * supported.
345 	 *
346 	 * @see RTE_DMA_CAPA_SILENT
347 	 */
348 	bool enable_silent;
349 };
350 
351 /**
352  * @warning
353  * @b EXPERIMENTAL: this API may change without prior notice.
354  *
355  * Configure a DMA device.
356  *
357  * This function must be invoked first before any other function in the
358  * API. This function can also be re-invoked when a device is in the
359  * stopped state.
360  *
361  * @param dev_id
362  *   The identifier of the device to configure.
363  * @param dev_conf
364  *   The DMA device configuration structure encapsulated into rte_dma_conf
365  *   object.
366  *
367  * @return
368  *   0 on success. Otherwise negative value is returned.
369  */
370 __rte_experimental
371 int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf);
372 
373 /**
374  * @warning
375  * @b EXPERIMENTAL: this API may change without prior notice.
376  *
377  * Start a DMA device.
378  *
379  * The device start step is the last one and consists of setting the DMA
380  * to start accepting jobs.
381  *
382  * @param dev_id
383  *   The identifier of the device.
384  *
385  * @return
386  *   0 on success. Otherwise negative value is returned.
387  */
388 __rte_experimental
389 int rte_dma_start(int16_t dev_id);
390 
391 /**
392  * @warning
393  * @b EXPERIMENTAL: this API may change without prior notice.
394  *
395  * Stop a DMA device.
396  *
397  * The device can be restarted with a call to rte_dma_start().
398  *
399  * @param dev_id
400  *   The identifier of the device.
401  *
402  * @return
403  *   0 on success. Otherwise negative value is returned.
404  */
405 __rte_experimental
406 int rte_dma_stop(int16_t dev_id);
407 
408 /**
409  * @warning
410  * @b EXPERIMENTAL: this API may change without prior notice.
411  *
412  * Close a DMA device.
413  *
414  * The device cannot be restarted after this call.
415  *
416  * @param dev_id
417  *   The identifier of the device.
418  *
419  * @return
420  *   0 on success. Otherwise negative value is returned.
421  */
422 __rte_experimental
423 int rte_dma_close(int16_t dev_id);
424 
425 /**
426  * DMA transfer direction defines.
427  *
428  * @see struct rte_dma_vchan_conf::direction
429  */
430 enum rte_dma_direction {
431 	/** DMA transfer direction - from memory to memory.
432 	 *
433 	 * @see struct rte_dma_vchan_conf::direction
434 	 */
435 	RTE_DMA_DIR_MEM_TO_MEM,
436 	/** DMA transfer direction - from memory to device.
437 	 * In a typical scenario, the SoCs are installed on host servers as
438 	 * iNICs through the PCIe interface. In this case, the SoCs works in
439 	 * EP(endpoint) mode, it could initiate a DMA move request from memory
440 	 * (which is SoCs memory) to device (which is host memory).
441 	 *
442 	 * @see struct rte_dma_vchan_conf::direction
443 	 */
444 	RTE_DMA_DIR_MEM_TO_DEV,
445 	/** DMA transfer direction - from device to memory.
446 	 * In a typical scenario, the SoCs are installed on host servers as
447 	 * iNICs through the PCIe interface. In this case, the SoCs works in
448 	 * EP(endpoint) mode, it could initiate a DMA move request from device
449 	 * (which is host memory) to memory (which is SoCs memory).
450 	 *
451 	 * @see struct rte_dma_vchan_conf::direction
452 	 */
453 	RTE_DMA_DIR_DEV_TO_MEM,
454 	/** DMA transfer direction - from device to device.
455 	 * In a typical scenario, the SoCs are installed on host servers as
456 	 * iNICs through the PCIe interface. In this case, the SoCs works in
457 	 * EP(endpoint) mode, it could initiate a DMA move request from device
458 	 * (which is host memory) to the device (which is another host memory).
459 	 *
460 	 * @see struct rte_dma_vchan_conf::direction
461 	 */
462 	RTE_DMA_DIR_DEV_TO_DEV,
463 };
464 
465 /**
466  * DMA access port type defines.
467  *
468  * @see struct rte_dma_port_param::port_type
469  */
470 enum rte_dma_port_type {
471 	RTE_DMA_PORT_NONE,
472 	RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */
473 };
474 
475 /**
476  * A structure used to descript DMA access port parameters.
477  *
478  * @see struct rte_dma_vchan_conf::src_port
479  * @see struct rte_dma_vchan_conf::dst_port
480  */
481 struct rte_dma_port_param {
482 	/** The device access port type.
483 	 *
484 	 * @see enum rte_dma_port_type
485 	 */
486 	enum rte_dma_port_type port_type;
487 	RTE_STD_C11
488 	union {
489 		/** PCIe access port parameters.
490 		 *
491 		 * The following model shows SoC's PCIe module connects to
492 		 * multiple PCIe hosts and multiple endpoints. The PCIe module
493 		 * has an integrated DMA controller.
494 		 *
495 		 * If the DMA wants to access the memory of host A, it can be
496 		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
497 		 *
498 		 * \code{.unparsed}
499 		 * System Bus
500 		 *    |     ----------PCIe module----------
501 		 *    |     Bus
502 		 *    |     Interface
503 		 *    |     -----        ------------------
504 		 *    |     |   |        | PCIe Core0     |
505 		 *    |     |   |        |                |        -----------
506 		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
507 		 *    |     |   |--------|        |- VF-1 |--------| Root    |
508 		 *    |     |   |        |   PF-1         |        | Complex |
509 		 *    |     |   |        |   PF-2         |        -----------
510 		 *    |     |   |        ------------------
511 		 *    |     |   |
512 		 *    |     |   |        ------------------
513 		 *    |     |   |        | PCIe Core1     |
514 		 *    |     |   |        |                |        -----------
515 		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
516 		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
517 		 *    |     |   |        |        |- VF-1 |        | Complex |
518 		 *    |     |   |        |   PF-2         |        -----------
519 		 *    |     |   |        ------------------
520 		 *    |     |   |
521 		 *    |     |   |        ------------------
522 		 *    |     |DMA|        |                |        ------
523 		 *    |     |   |        |                |--------| EP |
524 		 *    |     |   |--------| PCIe Core2     |        ------
525 		 *    |     |   |        |                |        ------
526 		 *    |     |   |        |                |--------| EP |
527 		 *    |     |   |        |                |        ------
528 		 *    |     -----        ------------------
529 		 *
530 		 * \endcode
531 		 *
532 		 * @note If some fields can not be supported by the
533 		 * hardware/driver, then the driver ignores those fields.
534 		 * Please check driver-specific documentation for limitations
535 		 * and capabilities.
536 		 */
537 		__extension__
538 		struct {
539 			uint64_t coreid : 4; /**< PCIe core id used. */
540 			uint64_t pfid : 8; /**< PF id used. */
541 			uint64_t vfen : 1; /**< VF enable bit. */
542 			uint64_t vfid : 16; /**< VF id used. */
543 			/** The pasid filed in TLP packet. */
544 			uint64_t pasid : 20;
545 			/** The attributes filed in TLP packet. */
546 			uint64_t attr : 3;
547 			/** The processing hint filed in TLP packet. */
548 			uint64_t ph : 2;
549 			/** The steering tag filed in TLP packet. */
550 			uint64_t st : 16;
551 		} pcie;
552 	};
553 	uint64_t reserved[2]; /**< Reserved for future fields. */
554 };
555 
556 /**
557  * A structure used to configure a virtual DMA channel.
558  *
559  * @see rte_dma_vchan_setup
560  */
561 struct rte_dma_vchan_conf {
562 	/** Transfer direction
563 	 *
564 	 * @see enum rte_dma_direction
565 	 */
566 	enum rte_dma_direction direction;
567 	/** Number of descriptor for the virtual DMA channel */
568 	uint16_t nb_desc;
569 	/** 1) Used to describes the device access port parameter in the
570 	 * device-to-memory transfer scenario.
571 	 * 2) Used to describes the source device access port parameter in the
572 	 * device-to-device transfer scenario.
573 	 *
574 	 * @see struct rte_dma_port_param
575 	 */
576 	struct rte_dma_port_param src_port;
577 	/** 1) Used to describes the device access port parameter in the
578 	 * memory-to-device transfer scenario.
579 	 * 2) Used to describes the destination device access port parameter in
580 	 * the device-to-device transfer scenario.
581 	 *
582 	 * @see struct rte_dma_port_param
583 	 */
584 	struct rte_dma_port_param dst_port;
585 };
586 
587 /**
588  * @warning
589  * @b EXPERIMENTAL: this API may change without prior notice.
590  *
591  * Allocate and set up a virtual DMA channel.
592  *
593  * @param dev_id
594  *   The identifier of the device.
595  * @param vchan
596  *   The identifier of virtual DMA channel. The value must be in the range
597  *   [0, nb_vchans - 1] previously supplied to rte_dma_configure().
598  * @param conf
599  *   The virtual DMA channel configuration structure encapsulated into
600  *   rte_dma_vchan_conf object.
601  *
602  * @return
603  *   0 on success. Otherwise negative value is returned.
604  */
605 __rte_experimental
606 int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
607 			const struct rte_dma_vchan_conf *conf);
608 
609 /**
610  * A structure used to retrieve statistics.
611  *
612  * @see rte_dma_stats_get
613  */
614 struct rte_dma_stats {
615 	/** Count of operations which were submitted to hardware. */
616 	uint64_t submitted;
617 	/** Count of operations which were completed, including successful and
618 	 * failed completions.
619 	 */
620 	uint64_t completed;
621 	/** Count of operations which failed to complete. */
622 	uint64_t errors;
623 };
624 
625 /**
626  * Special ID, which is used to represent all virtual DMA channels.
627  *
628  * @see rte_dma_stats_get
629  * @see rte_dma_stats_reset
630  */
631 #define RTE_DMA_ALL_VCHAN	0xFFFFu
632 
633 /**
634  * @warning
635  * @b EXPERIMENTAL: this API may change without prior notice.
636  *
637  * Retrieve basic statistics of a or all virtual DMA channel(s).
638  *
639  * @param dev_id
640  *   The identifier of the device.
641  * @param vchan
642  *   The identifier of virtual DMA channel.
643  *   If equal RTE_DMA_ALL_VCHAN means all channels.
644  * @param[out] stats
645  *   The basic statistics structure encapsulated into rte_dma_stats
646  *   object.
647  *
648  * @return
649  *   0 on success. Otherwise negative value is returned.
650  */
651 __rte_experimental
652 int rte_dma_stats_get(int16_t dev_id, uint16_t vchan,
653 		      struct rte_dma_stats *stats);
654 
655 /**
656  * @warning
657  * @b EXPERIMENTAL: this API may change without prior notice.
658  *
659  * Reset basic statistics of a or all virtual DMA channel(s).
660  *
661  * @param dev_id
662  *   The identifier of the device.
663  * @param vchan
664  *   The identifier of virtual DMA channel.
665  *   If equal RTE_DMA_ALL_VCHAN means all channels.
666  *
667  * @return
668  *   0 on success. Otherwise negative value is returned.
669  */
670 __rte_experimental
671 int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
672 
673 /**
674  * device vchannel status
675  *
676  * Enum with the options for the channel status, either idle, active or halted due to error
677  * @see rte_dma_vchan_status
678  */
679 enum rte_dma_vchan_status {
680 	RTE_DMA_VCHAN_IDLE,          /**< not processing, awaiting ops */
681 	RTE_DMA_VCHAN_ACTIVE,        /**< currently processing jobs */
682 	RTE_DMA_VCHAN_HALTED_ERROR,  /**< not processing due to error, cannot accept new ops */
683 };
684 
685 /**
686  * @warning
687  * @b EXPERIMENTAL: this API may change without prior notice.
688  *
689  * Determine if all jobs have completed on a device channel.
690  * This function is primarily designed for testing use, as it allows a process to check if
691  * all jobs are completed, without actually gathering completions from those jobs.
692  *
693  * @param dev_id
694  *   The identifier of the device.
695  * @param vchan
696  *   The identifier of virtual DMA channel.
697  * @param[out] status
698  *   The vchan status
699  * @return
700  *   0 - call completed successfully
701  *   < 0 - error code indicating there was a problem calling the API
702  */
703 __rte_experimental
704 int
705 rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status);
706 
707 /**
708  * @warning
709  * @b EXPERIMENTAL: this API may change without prior notice.
710  *
711  * Dump DMA device info.
712  *
713  * @param dev_id
714  *   The identifier of the device.
715  * @param f
716  *   The file to write the output to.
717  *
718  * @return
719  *   0 on success. Otherwise negative value is returned.
720  */
721 __rte_experimental
722 int rte_dma_dump(int16_t dev_id, FILE *f);
723 
724 /**
725  * DMA transfer result status code defines.
726  *
727  * @see rte_dma_completed_status
728  */
729 enum rte_dma_status_code {
730 	/** The operation completed successfully. */
731 	RTE_DMA_STATUS_SUCCESSFUL,
732 	/** The operation failed to complete due abort by user.
733 	 * This is mainly used when processing dev_stop, user could modify the
734 	 * descriptors (e.g. change one bit to tell hardware abort this job),
735 	 * it allows outstanding requests to be complete as much as possible,
736 	 * so reduce the time to stop the device.
737 	 */
738 	RTE_DMA_STATUS_USER_ABORT,
739 	/** The operation failed to complete due to following scenarios:
740 	 * The jobs in a particular batch are not attempted because they
741 	 * appeared after a fence where a previous job failed. In some HW
742 	 * implementation it's possible for jobs from later batches would be
743 	 * completed, though, so report the status from the not attempted jobs
744 	 * before reporting those newer completed jobs.
745 	 */
746 	RTE_DMA_STATUS_NOT_ATTEMPTED,
747 	/** The operation failed to complete due invalid source address. */
748 	RTE_DMA_STATUS_INVALID_SRC_ADDR,
749 	/** The operation failed to complete due invalid destination address. */
750 	RTE_DMA_STATUS_INVALID_DST_ADDR,
751 	/** The operation failed to complete due invalid source or destination
752 	 * address, cover the case that only knows the address error, but not
753 	 * sure which address error.
754 	 */
755 	RTE_DMA_STATUS_INVALID_ADDR,
756 	/** The operation failed to complete due invalid length. */
757 	RTE_DMA_STATUS_INVALID_LENGTH,
758 	/** The operation failed to complete due invalid opcode.
759 	 * The DMA descriptor could have multiple format, which are
760 	 * distinguished by the opcode field.
761 	 */
762 	RTE_DMA_STATUS_INVALID_OPCODE,
763 	/** The operation failed to complete due bus read error. */
764 	RTE_DMA_STATUS_BUS_READ_ERROR,
765 	/** The operation failed to complete due bus write error. */
766 	RTE_DMA_STATUS_BUS_WRITE_ERROR,
767 	/** The operation failed to complete due bus error, cover the case that
768 	 * only knows the bus error, but not sure which direction error.
769 	 */
770 	RTE_DMA_STATUS_BUS_ERROR,
771 	/** The operation failed to complete due data poison. */
772 	RTE_DMA_STATUS_DATA_POISION,
773 	/** The operation failed to complete due descriptor read error. */
774 	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
775 	/** The operation failed to complete due device link error.
776 	 * Used to indicates that the link error in the memory-to-device/
777 	 * device-to-memory/device-to-device transfer scenario.
778 	 */
779 	RTE_DMA_STATUS_DEV_LINK_ERROR,
780 	/** The operation failed to complete due lookup page fault. */
781 	RTE_DMA_STATUS_PAGE_FAULT,
782 	/** The operation failed to complete due unknown reason.
783 	 * The initial value is 256, which reserves space for future errors.
784 	 */
785 	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
786 };
787 
788 /**
789  * A structure used to hold scatter-gather DMA operation request entry.
790  *
791  * @see rte_dma_copy_sg
792  */
793 struct rte_dma_sge {
794 	rte_iova_t addr; /**< The DMA operation address. */
795 	uint32_t length; /**< The DMA operation length. */
796 };
797 
798 #include "rte_dmadev_core.h"
799 
800 /**@{@name DMA operation flag
801  * @see rte_dma_copy()
802  * @see rte_dma_copy_sg()
803  * @see rte_dma_fill()
804  */
805 /** Fence flag.
806  * It means the operation with this flag must be processed only after all
807  * previous operations are completed.
808  * If the specify DMA HW works in-order (it means it has default fence between
809  * operations), this flag could be NOP.
810  */
811 #define RTE_DMA_OP_FLAG_FENCE   RTE_BIT64(0)
812 /** Submit flag.
813  * It means the operation with this flag must issue doorbell to hardware after
814  * enqueued jobs.
815  */
816 #define RTE_DMA_OP_FLAG_SUBMIT  RTE_BIT64(1)
817 /** Write data to low level cache hint.
818  * Used for performance optimization, this is just a hint, and there is no
819  * capability bit for this, driver should not return error if this flag was set.
820  */
821 #define RTE_DMA_OP_FLAG_LLC     RTE_BIT64(2)
822 /**@}*/
823 
824 /**
825  * @warning
826  * @b EXPERIMENTAL: this API may change without prior notice.
827  *
828  * Enqueue a copy operation onto the virtual DMA channel.
829  *
830  * This queues up a copy operation to be performed by hardware, if the 'flags'
831  * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
832  * this operation, otherwise do not trigger doorbell.
833  *
834  * @param dev_id
835  *   The identifier of the device.
836  * @param vchan
837  *   The identifier of virtual DMA channel.
838  * @param src
839  *   The address of the source buffer.
840  * @param dst
841  *   The address of the destination buffer.
842  * @param length
843  *   The length of the data to be copied.
844  * @param flags
845  *   An flags for this operation.
846  *   @see RTE_DMA_OP_FLAG_*
847  *
848  * @return
849  *   - 0..UINT16_MAX: index of enqueued job.
850  *   - -ENOSPC: if no space left to enqueue.
851  *   - other values < 0 on failure.
852  */
853 __rte_experimental
854 static inline int
855 rte_dma_copy(int16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
856 	     uint32_t length, uint64_t flags)
857 {
858 	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
859 
860 #ifdef RTE_DMADEV_DEBUG
861 	if (!rte_dma_is_valid(dev_id) || length == 0)
862 		return -EINVAL;
863 	RTE_FUNC_PTR_OR_ERR_RET(*obj->copy, -ENOTSUP);
864 #endif
865 
866 	return (*obj->copy)(obj->dev_private, vchan, src, dst, length, flags);
867 }
868 
869 /**
870  * @warning
871  * @b EXPERIMENTAL: this API may change without prior notice.
872  *
873  * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
874  *
875  * This queues up a scatter-gather list copy operation to be performed by
876  * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
877  * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
878  *
879  * @param dev_id
880  *   The identifier of the device.
881  * @param vchan
882  *   The identifier of virtual DMA channel.
883  * @param src
884  *   The pointer of source scatter-gather entry array.
885  * @param dst
886  *   The pointer of destination scatter-gather entry array.
887  * @param nb_src
888  *   The number of source scatter-gather entry.
889  *   @see struct rte_dma_info::max_sges
890  * @param nb_dst
891  *   The number of destination scatter-gather entry.
892  *   @see struct rte_dma_info::max_sges
893  * @param flags
894  *   An flags for this operation.
895  *   @see RTE_DMA_OP_FLAG_*
896  *
897  * @return
898  *   - 0..UINT16_MAX: index of enqueued job.
899  *   - -ENOSPC: if no space left to enqueue.
900  *   - other values < 0 on failure.
901  */
902 __rte_experimental
903 static inline int
904 rte_dma_copy_sg(int16_t dev_id, uint16_t vchan, struct rte_dma_sge *src,
905 		struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst,
906 		uint64_t flags)
907 {
908 	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
909 
910 #ifdef RTE_DMADEV_DEBUG
911 	if (!rte_dma_is_valid(dev_id) || src == NULL || dst == NULL ||
912 	    nb_src == 0 || nb_dst == 0)
913 		return -EINVAL;
914 	RTE_FUNC_PTR_OR_ERR_RET(*obj->copy_sg, -ENOTSUP);
915 #endif
916 
917 	return (*obj->copy_sg)(obj->dev_private, vchan, src, dst, nb_src,
918 			       nb_dst, flags);
919 }
920 
921 /**
922  * @warning
923  * @b EXPERIMENTAL: this API may change without prior notice.
924  *
925  * Enqueue a fill operation onto the virtual DMA channel.
926  *
927  * This queues up a fill operation to be performed by hardware, if the 'flags'
928  * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
929  * this operation, otherwise do not trigger doorbell.
930  *
931  * @param dev_id
932  *   The identifier of the device.
933  * @param vchan
934  *   The identifier of virtual DMA channel.
935  * @param pattern
936  *   The pattern to populate the destination buffer with.
937  * @param dst
938  *   The address of the destination buffer.
939  * @param length
940  *   The length of the destination buffer.
941  * @param flags
942  *   An flags for this operation.
943  *   @see RTE_DMA_OP_FLAG_*
944  *
945  * @return
946  *   - 0..UINT16_MAX: index of enqueued job.
947  *   - -ENOSPC: if no space left to enqueue.
948  *   - other values < 0 on failure.
949  */
950 __rte_experimental
951 static inline int
952 rte_dma_fill(int16_t dev_id, uint16_t vchan, uint64_t pattern,
953 	     rte_iova_t dst, uint32_t length, uint64_t flags)
954 {
955 	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
956 
957 #ifdef RTE_DMADEV_DEBUG
958 	if (!rte_dma_is_valid(dev_id) || length == 0)
959 		return -EINVAL;
960 	RTE_FUNC_PTR_OR_ERR_RET(*obj->fill, -ENOTSUP);
961 #endif
962 
963 	return (*obj->fill)(obj->dev_private, vchan, pattern, dst, length,
964 			    flags);
965 }
966 
967 /**
968  * @warning
969  * @b EXPERIMENTAL: this API may change without prior notice.
970  *
971  * Trigger hardware to begin performing enqueued operations.
972  *
973  * This API is used to write the "doorbell" to the hardware to trigger it
974  * to begin the operations previously enqueued by rte_dma_copy/fill().
975  *
976  * @param dev_id
977  *   The identifier of the device.
978  * @param vchan
979  *   The identifier of virtual DMA channel.
980  *
981  * @return
982  *   0 on success. Otherwise negative value is returned.
983  */
984 __rte_experimental
985 static inline int
986 rte_dma_submit(int16_t dev_id, uint16_t vchan)
987 {
988 	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
989 
990 #ifdef RTE_DMADEV_DEBUG
991 	if (!rte_dma_is_valid(dev_id))
992 		return -EINVAL;
993 	RTE_FUNC_PTR_OR_ERR_RET(*obj->submit, -ENOTSUP);
994 #endif
995 
996 	return (*obj->submit)(obj->dev_private, vchan);
997 }
998 
999 /**
1000  * @warning
1001  * @b EXPERIMENTAL: this API may change without prior notice.
1002  *
1003  * Return the number of operations that have been successfully completed.
1004  * Once an operation has been reported as completed, the results of that
1005  * operation will be visible to all cores on the system.
1006  *
1007  * @param dev_id
1008  *   The identifier of the device.
1009  * @param vchan
1010  *   The identifier of virtual DMA channel.
1011  * @param nb_cpls
1012  *   The maximum number of completed operations that can be processed.
1013  * @param[out] last_idx
1014  *   The last completed operation's ring_idx.
1015  *   If not required, NULL can be passed in.
1016  * @param[out] has_error
1017  *   Indicates if there are transfer error.
1018  *   If not required, NULL can be passed in.
1019  *
1020  * @return
1021  *   The number of operations that successfully completed. This return value
1022  *   must be less than or equal to the value of nb_cpls.
1023  */
1024 __rte_experimental
1025 static inline uint16_t
1026 rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
1027 		  uint16_t *last_idx, bool *has_error)
1028 {
1029 	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1030 	uint16_t idx;
1031 	bool err;
1032 
1033 #ifdef RTE_DMADEV_DEBUG
1034 	if (!rte_dma_is_valid(dev_id) || nb_cpls == 0)
1035 		return 0;
1036 	RTE_FUNC_PTR_OR_ERR_RET(*obj->completed, 0);
1037 #endif
1038 
1039 	/* Ensure the pointer values are non-null to simplify drivers.
1040 	 * In most cases these should be compile time evaluated, since this is
1041 	 * an inline function.
1042 	 * - If NULL is explicitly passed as parameter, then compiler knows the
1043 	 *   value is NULL
1044 	 * - If address of local variable is passed as parameter, then compiler
1045 	 *   can know it's non-NULL.
1046 	 */
1047 	if (last_idx == NULL)
1048 		last_idx = &idx;
1049 	if (has_error == NULL)
1050 		has_error = &err;
1051 
1052 	*has_error = false;
1053 	return (*obj->completed)(obj->dev_private, vchan, nb_cpls, last_idx,
1054 				 has_error);
1055 }
1056 
1057 /**
1058  * @warning
1059  * @b EXPERIMENTAL: this API may change without prior notice.
1060  *
1061  * Return the number of operations that have been completed, and the operations
1062  * result may succeed or fail.
1063  * Once an operation has been reported as completed successfully, the results of that
1064  * operation will be visible to all cores on the system.
1065  *
1066  * @param dev_id
1067  *   The identifier of the device.
1068  * @param vchan
1069  *   The identifier of virtual DMA channel.
1070  * @param nb_cpls
1071  *   Indicates the size of status array.
1072  * @param[out] last_idx
1073  *   The last completed operation's ring_idx.
1074  *   If not required, NULL can be passed in.
1075  * @param[out] status
1076  *   This is a pointer to an array of length 'nb_cpls' that holds the completion
1077  *   status code of each operation.
1078  *   @see enum rte_dma_status_code
1079  *
1080  * @return
1081  *   The number of operations that completed. This return value must be less
1082  *   than or equal to the value of nb_cpls.
1083  *   If this number is greater than zero (assuming n), then n values in the
1084  *   status array are also set.
1085  */
1086 __rte_experimental
1087 static inline uint16_t
1088 rte_dma_completed_status(int16_t dev_id, uint16_t vchan,
1089 			 const uint16_t nb_cpls, uint16_t *last_idx,
1090 			 enum rte_dma_status_code *status)
1091 {
1092 	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1093 	uint16_t idx;
1094 
1095 #ifdef RTE_DMADEV_DEBUG
1096 	if (!rte_dma_is_valid(dev_id) || nb_cpls == 0 || status == NULL)
1097 		return 0;
1098 	RTE_FUNC_PTR_OR_ERR_RET(*obj->completed_status, 0);
1099 #endif
1100 
1101 	if (last_idx == NULL)
1102 		last_idx = &idx;
1103 
1104 	return (*obj->completed_status)(obj->dev_private, vchan, nb_cpls,
1105 					last_idx, status);
1106 }
1107 
1108 /**
1109  * @warning
1110  * @b EXPERIMENTAL: this API may change without prior notice.
1111  *
1112  * Check remaining capacity in descriptor ring for the current burst.
1113  *
1114  * @param dev_id
1115  *   The identifier of the device.
1116  * @param vchan
1117  *   The identifier of virtual DMA channel.
1118  *
1119  * @return
1120  *   - Remaining space in the descriptor ring for the current burst.
1121  *   - 0 on error
1122  */
1123 __rte_experimental
1124 static inline uint16_t
1125 rte_dma_burst_capacity(int16_t dev_id, uint16_t vchan)
1126 {
1127 	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1128 
1129 #ifdef RTE_DMADEV_DEBUG
1130 	if (!rte_dma_is_valid(dev_id))
1131 		return 0;
1132 	RTE_FUNC_PTR_OR_ERR_RET(*obj->burst_capacity, 0);
1133 #endif
1134 	return (*obj->burst_capacity)(obj->dev_private, vchan);
1135 }
1136 
1137 #ifdef __cplusplus
1138 }
1139 #endif
1140 
1141 #endif /* RTE_DMADEV_H */
1142