xref: /dpdk/lib/vhost/rte_vhost.h (revision c9902a15bd005b6d4fe072cf7b60fe4ee679155f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_VHOST_H_
6 #define _RTE_VHOST_H_
7 
8 /**
9  * @file
10  * Interface to vhost-user
11  */
12 
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <sys/eventfd.h>
16 
17 #include <rte_memory.h>
18 #include <rte_mempool.h>
19 
20 #ifdef __cplusplus
21 extern "C" {
22 #endif
23 
24 /* These are not C++-aware. */
25 #include <linux/vhost.h>
26 #include <linux/virtio_ring.h>
27 #include <linux/virtio_net.h>
28 
29 #define RTE_VHOST_USER_CLIENT		(1ULL << 0)
30 #define RTE_VHOST_USER_NO_RECONNECT	(1ULL << 1)
31 #define RTE_VHOST_USER_RESERVED_1	(1ULL << 2)
32 #define RTE_VHOST_USER_IOMMU_SUPPORT	(1ULL << 3)
33 #define RTE_VHOST_USER_POSTCOPY_SUPPORT		(1ULL << 4)
34 /* support mbuf with external buffer attached */
35 #define RTE_VHOST_USER_EXTBUF_SUPPORT	(1ULL << 5)
36 /* support only linear buffers (no chained mbufs) */
37 #define RTE_VHOST_USER_LINEARBUF_SUPPORT	(1ULL << 6)
38 #define RTE_VHOST_USER_ASYNC_COPY	(1ULL << 7)
39 #define RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS	(1ULL << 8)
40 
41 /* Features. */
42 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
43  #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
44 #endif
45 
46 #ifndef VIRTIO_NET_F_MQ
47  #define VIRTIO_NET_F_MQ		22
48 #endif
49 
50 #ifndef VIRTIO_NET_F_MTU
51  #define VIRTIO_NET_F_MTU 3
52 #endif
53 
54 #ifndef VIRTIO_F_ANY_LAYOUT
55  #define VIRTIO_F_ANY_LAYOUT		27
56 #endif
57 
58 /** Protocol features. */
59 #ifndef VHOST_USER_PROTOCOL_F_MQ
60 #define VHOST_USER_PROTOCOL_F_MQ	0
61 #endif
62 
63 #ifndef VHOST_USER_PROTOCOL_F_LOG_SHMFD
64 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD	1
65 #endif
66 
67 #ifndef VHOST_USER_PROTOCOL_F_RARP
68 #define VHOST_USER_PROTOCOL_F_RARP	2
69 #endif
70 
71 #ifndef VHOST_USER_PROTOCOL_F_REPLY_ACK
72 #define VHOST_USER_PROTOCOL_F_REPLY_ACK	3
73 #endif
74 
75 #ifndef VHOST_USER_PROTOCOL_F_NET_MTU
76 #define VHOST_USER_PROTOCOL_F_NET_MTU	4
77 #endif
78 
79 #ifndef VHOST_USER_PROTOCOL_F_SLAVE_REQ
80 #define VHOST_USER_PROTOCOL_F_SLAVE_REQ	5
81 #endif
82 
83 #ifndef VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
84 #define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7
85 #endif
86 
87 #ifndef VHOST_USER_PROTOCOL_F_PAGEFAULT
88 #define VHOST_USER_PROTOCOL_F_PAGEFAULT 8
89 #endif
90 
91 #ifndef VHOST_USER_PROTOCOL_F_CONFIG
92 #define VHOST_USER_PROTOCOL_F_CONFIG 9
93 #endif
94 
95 #ifndef VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
96 #define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10
97 #endif
98 
99 #ifndef VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
100 #define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11
101 #endif
102 
103 #ifndef VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
104 #define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12
105 #endif
106 
107 #ifndef VHOST_USER_PROTOCOL_F_STATUS
108 #define VHOST_USER_PROTOCOL_F_STATUS 16
109 #endif
110 
111 /** Indicate whether protocol features negotiation is supported. */
112 #ifndef VHOST_USER_F_PROTOCOL_FEATURES
113 #define VHOST_USER_F_PROTOCOL_FEATURES	30
114 #endif
115 
116 struct rte_vdpa_device;
117 
118 /**
119  * Information relating to memory regions including offsets to
120  * addresses in QEMUs memory file.
121  */
122 struct rte_vhost_mem_region {
123 	uint64_t guest_phys_addr;
124 	uint64_t guest_user_addr;
125 	uint64_t host_user_addr;
126 	uint64_t size;
127 	void	 *mmap_addr;
128 	uint64_t mmap_size;
129 	int fd;
130 };
131 
132 /**
133  * Memory structure includes region and mapping information.
134  */
135 struct rte_vhost_memory {
136 	uint32_t nregions;
137 	struct rte_vhost_mem_region regions[];
138 };
139 
140 struct rte_vhost_inflight_desc_split {
141 	uint8_t inflight;
142 	uint8_t padding[5];
143 	uint16_t next;
144 	uint64_t counter;
145 };
146 
147 struct rte_vhost_inflight_info_split {
148 	uint64_t features;
149 	uint16_t version;
150 	uint16_t desc_num;
151 	uint16_t last_inflight_io;
152 	uint16_t used_idx;
153 	struct rte_vhost_inflight_desc_split desc[0];
154 };
155 
156 struct rte_vhost_inflight_desc_packed {
157 	uint8_t inflight;
158 	uint8_t padding;
159 	uint16_t next;
160 	uint16_t last;
161 	uint16_t num;
162 	uint64_t counter;
163 	uint16_t id;
164 	uint16_t flags;
165 	uint32_t len;
166 	uint64_t addr;
167 };
168 
169 struct rte_vhost_inflight_info_packed {
170 	uint64_t features;
171 	uint16_t version;
172 	uint16_t desc_num;
173 	uint16_t free_head;
174 	uint16_t old_free_head;
175 	uint16_t used_idx;
176 	uint16_t old_used_idx;
177 	uint8_t used_wrap_counter;
178 	uint8_t old_used_wrap_counter;
179 	uint8_t padding[7];
180 	struct rte_vhost_inflight_desc_packed desc[0];
181 };
182 
183 struct rte_vhost_resubmit_desc {
184 	uint16_t index;
185 	uint64_t counter;
186 };
187 
188 struct rte_vhost_resubmit_info {
189 	struct rte_vhost_resubmit_desc *resubmit_list;
190 	uint16_t resubmit_num;
191 };
192 
193 struct rte_vhost_ring_inflight {
194 	union {
195 		struct rte_vhost_inflight_info_split *inflight_split;
196 		struct rte_vhost_inflight_info_packed *inflight_packed;
197 	};
198 
199 	struct rte_vhost_resubmit_info *resubmit_inflight;
200 };
201 
202 struct rte_vhost_vring {
203 	union {
204 		struct vring_desc *desc;
205 		struct vring_packed_desc *desc_packed;
206 	};
207 	union {
208 		struct vring_avail *avail;
209 		struct vring_packed_desc_event *driver_event;
210 	};
211 	union {
212 		struct vring_used *used;
213 		struct vring_packed_desc_event *device_event;
214 	};
215 	uint64_t		log_guest_addr;
216 
217 	/** Deprecated, use rte_vhost_vring_call() instead. */
218 	int			callfd;
219 
220 	int			kickfd;
221 	uint16_t		size;
222 };
223 
224 /**
225  * Possible results of the vhost user message handling callbacks
226  */
227 enum rte_vhost_msg_result {
228 	/* Message handling failed */
229 	RTE_VHOST_MSG_RESULT_ERR = -1,
230 	/* Message handling successful */
231 	RTE_VHOST_MSG_RESULT_OK =  0,
232 	/* Message handling successful and reply prepared */
233 	RTE_VHOST_MSG_RESULT_REPLY =  1,
234 	/* Message not handled */
235 	RTE_VHOST_MSG_RESULT_NOT_HANDLED,
236 };
237 
238 /**
239  * Function prototype for the vhost backend to handle specific vhost user
240  * messages.
241  *
242  * @param vid
243  *  vhost device id
244  * @param msg
245  *  Message pointer.
246  * @return
247  *  RTE_VHOST_MSG_RESULT_OK on success,
248  *  RTE_VHOST_MSG_RESULT_REPLY on success with reply,
249  *  RTE_VHOST_MSG_RESULT_ERR on failure,
250  *  RTE_VHOST_MSG_RESULT_NOT_HANDLED if message was not handled.
251  */
252 typedef enum rte_vhost_msg_result (*rte_vhost_msg_handle)(int vid, void *msg);
253 
254 /**
255  * Optional vhost user message handlers.
256  */
257 struct rte_vhost_user_extern_ops {
258 	/* Called prior to the master message handling. */
259 	rte_vhost_msg_handle pre_msg_handle;
260 	/* Called after the master message handling. */
261 	rte_vhost_msg_handle post_msg_handle;
262 };
263 
264 /**
265  * Device and vring operations.
266  */
267 struct vhost_device_ops {
268 	int (*new_device)(int vid);		/**< Add device. */
269 	void (*destroy_device)(int vid);	/**< Remove device. */
270 
271 	int (*vring_state_changed)(int vid, uint16_t queue_id, int enable);	/**< triggered when a vring is enabled or disabled */
272 
273 	/**
274 	 * Features could be changed after the feature negotiation.
275 	 * For example, VHOST_F_LOG_ALL will be set/cleared at the
276 	 * start/end of live migration, respectively. This callback
277 	 * is used to inform the application on such change.
278 	 */
279 	int (*features_changed)(int vid, uint64_t features);
280 
281 	int (*new_connection)(int vid);
282 	void (*destroy_connection)(int vid);
283 
284 	/**
285 	 * This callback gets called each time a guest gets notified
286 	 * about waiting packets. This is the interrupt handling through
287 	 * the eventfd_write(callfd), which can be used for counting these
288 	 * "slow" syscalls.
289 	 */
290 	void (*guest_notified)(int vid);
291 
292 	void *reserved[1]; /**< Reserved for future extension */
293 };
294 
295 /**
296  * Convert guest physical address to host virtual address
297  *
298  * This function is deprecated because unsafe.
299  * New rte_vhost_va_from_guest_pa() should be used instead to ensure
300  * guest physical ranges are fully and contiguously mapped into
301  * process virtual address space.
302  *
303  * @param mem
304  *  the guest memory regions
305  * @param gpa
306  *  the guest physical address for querying
307  * @return
308  *  the host virtual address on success, 0 on failure
309  */
310 __rte_deprecated
311 static __rte_always_inline uint64_t
312 rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
313 {
314 	struct rte_vhost_mem_region *reg;
315 	uint32_t i;
316 
317 	for (i = 0; i < mem->nregions; i++) {
318 		reg = &mem->regions[i];
319 		if (gpa >= reg->guest_phys_addr &&
320 		    gpa <  reg->guest_phys_addr + reg->size) {
321 			return gpa - reg->guest_phys_addr +
322 			       reg->host_user_addr;
323 		}
324 	}
325 
326 	return 0;
327 }
328 
329 /**
330  * Convert guest physical address to host virtual address safely
331  *
332  * This variant of rte_vhost_gpa_to_vva() takes care all the
333  * requested length is mapped and contiguous in process address
334  * space.
335  *
336  * @param mem
337  *  the guest memory regions
338  * @param gpa
339  *  the guest physical address for querying
340  * @param len
341  *  the size of the requested area to map, updated with actual size mapped
342  * @return
343  *  the host virtual address on success, 0 on failure
344  */
345 static __rte_always_inline uint64_t
346 rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem,
347 						   uint64_t gpa, uint64_t *len)
348 {
349 	struct rte_vhost_mem_region *r;
350 	uint32_t i;
351 
352 	for (i = 0; i < mem->nregions; i++) {
353 		r = &mem->regions[i];
354 		if (gpa >= r->guest_phys_addr &&
355 		    gpa <  r->guest_phys_addr + r->size) {
356 
357 			if (unlikely(*len > r->guest_phys_addr + r->size - gpa))
358 				*len = r->guest_phys_addr + r->size - gpa;
359 
360 			return gpa - r->guest_phys_addr +
361 			       r->host_user_addr;
362 		}
363 	}
364 	*len = 0;
365 
366 	return 0;
367 }
368 
369 #define RTE_VHOST_NEED_LOG(features)	((features) & (1ULL << VHOST_F_LOG_ALL))
370 
371 /**
372  * Log the memory write start with given address.
373  *
374  * This function only need be invoked when the live migration starts.
375  * Therefore, we won't need call it at all in the most of time. For
376  * making the performance impact be minimum, it's suggested to do a
377  * check before calling it:
378  *
379  *        if (unlikely(RTE_VHOST_NEED_LOG(features)))
380  *                rte_vhost_log_write(vid, addr, len);
381  *
382  * @param vid
383  *  vhost device ID
384  * @param addr
385  *  the starting address for write (in guest physical address space)
386  * @param len
387  *  the length to write
388  */
389 void rte_vhost_log_write(int vid, uint64_t addr, uint64_t len);
390 
391 /**
392  * Log the used ring update start at given offset.
393  *
394  * Same as rte_vhost_log_write, it's suggested to do a check before
395  * calling it:
396  *
397  *        if (unlikely(RTE_VHOST_NEED_LOG(features)))
398  *                rte_vhost_log_used_vring(vid, vring_idx, offset, len);
399  *
400  * @param vid
401  *  vhost device ID
402  * @param vring_idx
403  *  the vring index
404  * @param offset
405  *  the offset inside the used ring
406  * @param len
407  *  the length to write
408  */
409 void rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
410 			      uint64_t offset, uint64_t len);
411 
412 int rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable);
413 
414 /**
415  * Register vhost driver. path could be different for multiple
416  * instance support.
417  */
418 int rte_vhost_driver_register(const char *path, uint64_t flags);
419 
420 /* Unregister vhost driver. This is only meaningful to vhost user. */
421 int rte_vhost_driver_unregister(const char *path);
422 
423 /**
424  * Set the vdpa device id, enforce single connection per socket
425  *
426  * @param path
427  *  The vhost-user socket file path
428  * @param dev
429  *  vDPA device pointer
430  * @return
431  *  0 on success, -1 on failure
432  */
433 int
434 rte_vhost_driver_attach_vdpa_device(const char *path,
435 		struct rte_vdpa_device *dev);
436 
437 /**
438  * Unset the vdpa device id
439  *
440  * @param path
441  *  The vhost-user socket file path
442  * @return
443  *  0 on success, -1 on failure
444  */
445 int
446 rte_vhost_driver_detach_vdpa_device(const char *path);
447 
448 /**
449  * Get the device id
450  *
451  * @param path
452  *  The vhost-user socket file path
453  * @return
454  *  vDPA device pointer, NULL on failure
455  */
456 struct rte_vdpa_device *
457 rte_vhost_driver_get_vdpa_device(const char *path);
458 
459 /**
460  * Set the feature bits the vhost-user driver supports.
461  *
462  * @param path
463  *  The vhost-user socket file path
464  * @param features
465  *  Supported features
466  * @return
467  *  0 on success, -1 on failure
468  */
469 int rte_vhost_driver_set_features(const char *path, uint64_t features);
470 
471 /**
472  * Enable vhost-user driver features.
473  *
474  * Note that
475  * - the param features should be a subset of the feature bits provided
476  *   by rte_vhost_driver_set_features().
477  * - it must be invoked before vhost-user negotiation starts.
478  *
479  * @param path
480  *  The vhost-user socket file path
481  * @param features
482  *  Features to enable
483  * @return
484  *  0 on success, -1 on failure
485  */
486 int rte_vhost_driver_enable_features(const char *path, uint64_t features);
487 
488 /**
489  * Disable vhost-user driver features.
490  *
491  * The two notes at rte_vhost_driver_enable_features() also apply here.
492  *
493  * @param path
494  *  The vhost-user socket file path
495  * @param features
496  *  Features to disable
497  * @return
498  *  0 on success, -1 on failure
499  */
500 int rte_vhost_driver_disable_features(const char *path, uint64_t features);
501 
502 /**
503  * Get the feature bits before feature negotiation.
504  *
505  * @param path
506  *  The vhost-user socket file path
507  * @param features
508  *  A pointer to store the queried feature bits
509  * @return
510  *  0 on success, -1 on failure
511  */
512 int rte_vhost_driver_get_features(const char *path, uint64_t *features);
513 
514 /**
515  * Set the protocol feature bits before feature negotiation.
516  *
517  * @param path
518  *  The vhost-user socket file path
519  * @param protocol_features
520  *  Supported protocol features
521  * @return
522  *  0 on success, -1 on failure
523  */
524 int
525 rte_vhost_driver_set_protocol_features(const char *path,
526 		uint64_t protocol_features);
527 
528 /**
529  * Get the protocol feature bits before feature negotiation.
530  *
531  * @param path
532  *  The vhost-user socket file path
533  * @param protocol_features
534  *  A pointer to store the queried protocol feature bits
535  * @return
536  *  0 on success, -1 on failure
537  */
538 int
539 rte_vhost_driver_get_protocol_features(const char *path,
540 		uint64_t *protocol_features);
541 
542 /**
543  * Get the queue number bits before feature negotiation.
544  *
545  * @param path
546  *  The vhost-user socket file path
547  * @param queue_num
548  *  A pointer to store the queried queue number bits
549  * @return
550  *  0 on success, -1 on failure
551  */
552 int
553 rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num);
554 
555 /**
556  * Get the feature bits after negotiation
557  *
558  * @param vid
559  *  Vhost device ID
560  * @param features
561  *  A pointer to store the queried feature bits
562  * @return
563  *  0 on success, -1 on failure
564  */
565 int rte_vhost_get_negotiated_features(int vid, uint64_t *features);
566 
567 /**
568  * Get the protocol feature bits after negotiation
569  *
570  * @param vid
571  *  Vhost device ID
572  * @param protocol_features
573  *  A pointer to store the queried protocol feature bits
574  * @return
575  *  0 on success, -1 on failure
576  */
577 __rte_experimental
578 int
579 rte_vhost_get_negotiated_protocol_features(int vid,
580 					   uint64_t *protocol_features);
581 
582 /* Register callbacks. */
583 int rte_vhost_driver_callback_register(const char *path,
584 	struct vhost_device_ops const * const ops);
585 
586 /**
587  *
588  * Start the vhost-user driver.
589  *
590  * This function triggers the vhost-user negotiation.
591  *
592  * @param path
593  *  The vhost-user socket file path
594  * @return
595  *  0 on success, -1 on failure
596  */
597 int rte_vhost_driver_start(const char *path);
598 
599 /**
600  * Get the MTU value of the device if set in QEMU.
601  *
602  * @param vid
603  *  virtio-net device ID
604  * @param mtu
605  *  The variable to store the MTU value
606  *
607  * @return
608  *  0: success
609  *  -EAGAIN: device not yet started
610  *  -ENOTSUP: device does not support MTU feature
611  */
612 int rte_vhost_get_mtu(int vid, uint16_t *mtu);
613 
614 /**
615  * Get the numa node from which the virtio net device's memory
616  * is allocated.
617  *
618  * @param vid
619  *  vhost device ID
620  *
621  * @return
622  *  The numa node, -1 on failure
623  */
624 int rte_vhost_get_numa_node(int vid);
625 
626 /**
627  * @deprecated
628  * Get the number of queues the device supports.
629  *
630  * Note this function is deprecated, as it returns a queue pair number,
631  * which is vhost specific. Instead, rte_vhost_get_vring_num should
632  * be used.
633  *
634  * @param vid
635  *  vhost device ID
636  *
637  * @return
638  *  The number of queues, 0 on failure
639  */
640 __rte_deprecated
641 uint32_t rte_vhost_get_queue_num(int vid);
642 
643 /**
644  * Get the number of vrings the device supports.
645  *
646  * @param vid
647  *  vhost device ID
648  *
649  * @return
650  *  The number of vrings, 0 on failure
651  */
652 uint16_t rte_vhost_get_vring_num(int vid);
653 
654 /**
655  * Get the virtio net device's ifname, which is the vhost-user socket
656  * file path.
657  *
658  * @param vid
659  *  vhost device ID
660  * @param buf
661  *  The buffer to stored the queried ifname
662  * @param len
663  *  The length of buf
664  *
665  * @return
666  *  0 on success, -1 on failure
667  */
668 int rte_vhost_get_ifname(int vid, char *buf, size_t len);
669 
670 /**
671  * Get how many avail entries are left in the queue
672  *
673  * @param vid
674  *  vhost device ID
675  * @param queue_id
676  *  virtio queue index
677  *
678  * @return
679  *  num of avail entries left
680  */
681 uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id);
682 
683 struct rte_mbuf;
684 struct rte_mempool;
685 /**
686  * This function adds buffers to the virtio devices RX virtqueue. Buffers can
687  * be received from the physical port or from another virtual device. A packet
688  * count is returned to indicate the number of packets that were successfully
689  * added to the RX queue.
690  * @param vid
691  *  vhost device ID
692  * @param queue_id
693  *  virtio queue index in mq case
694  * @param pkts
695  *  array to contain packets to be enqueued
696  * @param count
697  *  packets num to be enqueued
698  * @return
699  *  num of packets enqueued
700  */
701 uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
702 	struct rte_mbuf **pkts, uint16_t count);
703 
704 /**
705  * This function gets guest buffers from the virtio device TX virtqueue,
706  * construct host mbufs, copies guest buffer content to host mbufs and
707  * store them in pkts to be processed.
708  * @param vid
709  *  vhost device ID
710  * @param queue_id
711  *  virtio queue index in mq case
712  * @param mbuf_pool
713  *  mbuf_pool where host mbuf is allocated.
714  * @param pkts
715  *  array to contain packets to be dequeued
716  * @param count
717  *  packets num to be dequeued
718  * @return
719  *  num of packets dequeued
720  */
721 uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
722 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
723 
724 /**
725  * Get guest mem table: a list of memory regions.
726  *
727  * An rte_vhost_vhost_memory object will be allocated internally, to hold the
728  * guest memory regions. Application should free it at destroy_device()
729  * callback.
730  *
731  * @param vid
732  *  vhost device ID
733  * @param mem
734  *  To store the returned mem regions
735  * @return
736  *  0 on success, -1 on failure
737  */
738 int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
739 
740 /**
741  * Get guest vring info, including the vring address, vring size, etc.
742  *
743  * @param vid
744  *  vhost device ID
745  * @param vring_idx
746  *  vring index
747  * @param vring
748  *  the structure to hold the requested vring info
749  * @return
750  *  0 on success, -1 on failure
751  */
752 int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
753 			      struct rte_vhost_vring *vring);
754 
755 /**
756  * Get guest inflight vring info, including inflight ring and resubmit list.
757  *
758  * @param vid
759  *  vhost device ID
760  * @param vring_idx
761  *  vring index
762  * @param vring
763  *  the structure to hold the requested inflight vring info
764  * @return
765  *  0 on success, -1 on failure
766  */
767 int
768 rte_vhost_get_vhost_ring_inflight(int vid, uint16_t vring_idx,
769 	struct rte_vhost_ring_inflight *vring);
770 
771 /**
772  * Set split inflight descriptor.
773  *
774  * This function save descriptors that has been comsumed in available
775  * ring
776  *
777  * @param vid
778  *  vhost device ID
779  * @param vring_idx
780  *  vring index
781  * @param idx
782  *  inflight entry index
783  * @return
784  *  0 on success, -1 on failure
785  */
786 int
787 rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
788 	uint16_t idx);
789 
790 /**
791  * Set packed inflight descriptor and get corresponding inflight entry
792  *
793  * This function save descriptors that has been comsumed
794  *
795  * @param vid
796  *  vhost device ID
797  * @param vring_idx
798  *  vring index
799  * @param head
800  *  head of descriptors
801  * @param last
802  *  last of descriptors
803  * @param inflight_entry
804  *  corresponding inflight entry
805  * @return
806  *  0 on success, -1 on failure
807  */
808 int
809 rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
810 	uint16_t head, uint16_t last, uint16_t *inflight_entry);
811 
812 /**
813  * Save the head of list that the last batch of used descriptors.
814  *
815  * @param vid
816  *  vhost device ID
817  * @param vring_idx
818  *  vring index
819  * @param idx
820  *  descriptor entry index
821  * @return
822  *  0 on success, -1 on failure
823  */
824 int
825 rte_vhost_set_last_inflight_io_split(int vid,
826 	uint16_t vring_idx, uint16_t idx);
827 
828 /**
829  * Update the inflight free_head, used_idx and used_wrap_counter.
830  *
831  * This function will update status first before updating descriptors
832  * to used
833  *
834  * @param vid
835  *  vhost device ID
836  * @param vring_idx
837  *  vring index
838  * @param head
839  *  head of descriptors
840  * @return
841  *  0 on success, -1 on failure
842  */
843 int
844 rte_vhost_set_last_inflight_io_packed(int vid,
845 	uint16_t vring_idx, uint16_t head);
846 
847 /**
848  * Clear the split inflight status.
849  *
850  * @param vid
851  *  vhost device ID
852  * @param vring_idx
853  *  vring index
854  * @param last_used_idx
855  *  last used idx of used ring
856  * @param idx
857  *  inflight entry index
858  * @return
859  *  0 on success, -1 on failure
860  */
861 int
862 rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
863 	uint16_t last_used_idx, uint16_t idx);
864 
865 /**
866  * Clear the packed inflight status.
867  *
868  * @param vid
869  *  vhost device ID
870  * @param vring_idx
871  *  vring index
872  * @param head
873  *  inflight entry index
874  * @return
875  *  0 on success, -1 on failure
876  */
877 int
878 rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
879 	uint16_t head);
880 
881 /**
882  * Notify the guest that used descriptors have been added to the vring.  This
883  * function acts as a memory barrier.
884  *
885  * @param vid
886  *  vhost device ID
887  * @param vring_idx
888  *  vring index
889  * @return
890  *  0 on success, -1 on failure
891  */
892 int rte_vhost_vring_call(int vid, uint16_t vring_idx);
893 
894 /**
895  * Get vhost RX queue avail count.
896  *
897  * @param vid
898  *  vhost device ID
899  * @param qid
900  *  virtio queue index in mq case
901  * @return
902  *  num of desc available
903  */
904 uint32_t rte_vhost_rx_queue_count(int vid, uint16_t qid);
905 
906 /**
907  * Get log base and log size of the vhost device
908  *
909  * @param vid
910  *  vhost device ID
911  * @param log_base
912  *  vhost log base
913  * @param log_size
914  *  vhost log size
915  * @return
916  *  0 on success, -1 on failure
917  */
918 int
919 rte_vhost_get_log_base(int vid, uint64_t *log_base, uint64_t *log_size);
920 
921 /**
922  * Get last_avail/used_idx of the vhost virtqueue
923  *
924  * @param vid
925  *  vhost device ID
926  * @param queue_id
927  *  vhost queue index
928  * @param last_avail_idx
929  *  vhost last_avail_idx to get
930  * @param last_used_idx
931  *  vhost last_used_idx to get
932  * @return
933  *  0 on success, -1 on failure
934  */
935 int
936 rte_vhost_get_vring_base(int vid, uint16_t queue_id,
937 		uint16_t *last_avail_idx, uint16_t *last_used_idx);
938 
939 /**
940  * Get last_avail/last_used of the vhost virtqueue
941  *
942  * This function is designed for the reconnection and it's specific for
943  * the packed ring as we can get the two parameters from the inflight
944  * queueregion
945  *
946  * @param vid
947  *  vhost device ID
948  * @param queue_id
949  *  vhost queue index
950  * @param last_avail_idx
951  *  vhost last_avail_idx to get
952  * @param last_used_idx
953  *  vhost last_used_idx to get
954  * @return
955  *  0 on success, -1 on failure
956  */
957 int
958 rte_vhost_get_vring_base_from_inflight(int vid,
959 	uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx);
960 
961 /**
962  * Set last_avail/used_idx of the vhost virtqueue
963  *
964  * @param vid
965  *  vhost device ID
966  * @param queue_id
967  *  vhost queue index
968  * @param last_avail_idx
969  *  last_avail_idx to set
970  * @param last_used_idx
971  *  last_used_idx to set
972  * @return
973  *  0 on success, -1 on failure
974  */
975 int
976 rte_vhost_set_vring_base(int vid, uint16_t queue_id,
977 		uint16_t last_avail_idx, uint16_t last_used_idx);
978 
979 /**
980  * Register external message handling callbacks
981  *
982  * @param vid
983  *  vhost device ID
984  * @param ops
985  *  virtio external callbacks to register
986  * @param ctx
987  *  additional context passed to the callbacks
988  * @return
989  *  0 on success, -1 on failure
990  */
991 int
992 rte_vhost_extern_callback_register(int vid,
993 		struct rte_vhost_user_extern_ops const * const ops, void *ctx);
994 
995 /**
996  * Get vdpa device id for vhost device.
997  *
998  * @param vid
999  *  vhost device id
1000  * @return
1001  *  vDPA device pointer on success, NULL on failure
1002  */
1003 struct rte_vdpa_device *
1004 rte_vhost_get_vdpa_device(int vid);
1005 
1006 /**
1007  * Notify the guest that should get virtio configuration space from backend.
1008  *
1009  * @param vid
1010  *  vhost device ID
1011  * @param need_reply
1012  *  wait for the master response the status of this operation
1013  * @return
1014  *  0 on success, < 0 on failure
1015  */
1016 __rte_experimental
1017 int
1018 rte_vhost_slave_config_change(int vid, bool need_reply);
1019 
1020 #ifdef __cplusplus
1021 }
1022 #endif
1023 
1024 #endif /* _RTE_VHOST_H_ */
1025