xref: /dpdk/lib/vhost/rte_vhost_async.h (revision e5fb1a9698e7111473ca0980fdf6c0edb7acdf91)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #ifndef _RTE_VHOST_ASYNC_H_
6 #define _RTE_VHOST_ASYNC_H_
7 
8 #include "rte_vhost.h"
9 
10 /**
11  * iovec iterator
12  */
13 struct rte_vhost_iov_iter {
14 	/** offset to the first byte of interesting data */
15 	size_t offset;
16 	/** total bytes of data in this iterator */
17 	size_t count;
18 	/** pointer to the iovec array */
19 	struct iovec *iov;
20 	/** number of iovec in this iterator */
21 	unsigned long nr_segs;
22 };
23 
24 /**
25  * dma transfer descriptor pair
26  */
27 struct rte_vhost_async_desc {
28 	/** source memory iov_iter */
29 	struct rte_vhost_iov_iter *src;
30 	/** destination memory iov_iter */
31 	struct rte_vhost_iov_iter *dst;
32 };
33 
34 /**
35  * dma transfer status
36  */
37 struct rte_vhost_async_status {
38 	/** An array of application specific data for source memory */
39 	uintptr_t *src_opaque_data;
40 	/** An array of application specific data for destination memory */
41 	uintptr_t *dst_opaque_data;
42 };
43 
44 /**
45  * dma operation callbacks to be implemented by applications
46  */
47 struct rte_vhost_async_channel_ops {
48 	/**
49 	 * instruct async engines to perform copies for a batch of packets
50 	 *
51 	 * @param vid
52 	 *  id of vhost device to perform data copies
53 	 * @param queue_id
54 	 *  queue id to perform data copies
55 	 * @param descs
56 	 *  an array of DMA transfer memory descriptors
57 	 * @param opaque_data
58 	 *  opaque data pair sending to DMA engine
59 	 * @param count
60 	 *  number of elements in the "descs" array
61 	 * @return
62 	 *  number of descs processed
63 	 */
64 	uint32_t (*transfer_data)(int vid, uint16_t queue_id,
65 		struct rte_vhost_async_desc *descs,
66 		struct rte_vhost_async_status *opaque_data,
67 		uint16_t count);
68 	/**
69 	 * check copy-completed packets from the async engine
70 	 * @param vid
71 	 *  id of vhost device to check copy completion
72 	 * @param queue_id
73 	 *  queue id to check copy completion
74 	 * @param opaque_data
75 	 *  buffer to receive the opaque data pair from DMA engine
76 	 * @param max_packets
77 	 *  max number of packets could be completed
78 	 * @return
79 	 *  number of async descs completed
80 	 */
81 	uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
82 		struct rte_vhost_async_status *opaque_data,
83 		uint16_t max_packets);
84 };
85 
86 /**
87  * inflight async packet information
88  */
89 struct async_inflight_info {
90 	struct rte_mbuf *mbuf;
91 	uint16_t descs; /* num of descs inflight */
92 };
93 
94 /**
95  *  dma channel feature bit definition
96  */
97 struct rte_vhost_async_features {
98 	union {
99 		uint32_t intval;
100 		struct {
101 			uint32_t async_inorder:1;
102 			uint32_t resvd_0:15;
103 			uint32_t async_threshold:12;
104 			uint32_t resvd_1:4;
105 		};
106 	};
107 };
108 
109 /**
110  * register an async channel for vhost
111  *
112  * @param vid
113  *  vhost device id async channel to be attached to
114  * @param queue_id
115  *  vhost queue id async channel to be attached to
116  * @param features
117  *  DMA channel feature bit
118  *    b0       : DMA supports inorder data transfer
119  *    b1  - b15: reserved
120  *    b16 - b27: Packet length threshold for DMA transfer
121  *    b28 - b31: reserved
122  * @param ops
123  *  DMA operation callbacks
124  * @return
125  *  0 on success, -1 on failures
126  */
127 __rte_experimental
128 int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
129 	uint32_t features, struct rte_vhost_async_channel_ops *ops);
130 
131 /**
132  * unregister a dma channel for vhost
133  *
134  * @param vid
135  *  vhost device id DMA channel to be detached
136  * @param queue_id
137  *  vhost queue id DMA channel to be detached
138  * @return
139  *  0 on success, -1 on failures
140  */
141 __rte_experimental
142 int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id);
143 
144 /**
145  * This function submits enqueue data to async engine. Successfully
146  * enqueued packets can be transfer completed or being occupied by DMA
147  * engines, when this API returns. Transfer completed packets are returned
148  * in comp_pkts, so users need to guarantee its size is greater than or
149  * equal to the size of pkts; for packets that are successfully enqueued
150  * but not transfer completed, users should poll transfer status by
151  * rte_vhost_poll_enqueue_completed().
152  *
153  * @param vid
154  *  id of vhost device to enqueue data
155  * @param queue_id
156  *  queue id to enqueue data
157  * @param pkts
158  *  array of packets to be enqueued
159  * @param count
160  *  packets num to be enqueued
161  * @param comp_pkts
162  *  empty array to get transfer completed packets. Users need to
163  *  guarantee its size is greater than or equal to that of pkts
164  * @param comp_count
165  *  num of packets that are transfer completed, when this API returns.
166  *  If no packets are transfer completed, its value is set to 0.
167  * @return
168  *  num of packets enqueued, including in-flight and transfer completed
169  */
170 __rte_experimental
171 uint16_t rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
172 		struct rte_mbuf **pkts, uint16_t count,
173 		struct rte_mbuf **comp_pkts, uint32_t *comp_count);
174 
175 /**
176  * This function checks async completion status for a specific vhost
177  * device queue. Packets which finish copying (enqueue) operation
178  * will be returned in an array.
179  *
180  * @param vid
181  *  id of vhost device to enqueue data
182  * @param queue_id
183  *  queue id to enqueue data
184  * @param pkts
185  *  blank array to get return packet pointer
186  * @param count
187  *  size of the packet array
188  * @return
189  *  num of packets returned
190  */
191 __rte_experimental
192 uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
193 		struct rte_mbuf **pkts, uint16_t count);
194 
195 #endif /* _RTE_VHOST_ASYNC_H_ */
196