xref: /spdk/lib/vhost/vhost_internal.h (revision 2e1d23f4b70ea8940db7624b3bb974a4a8658ec7)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #ifndef SPDK_VHOST_INTERNAL_H
7 #define SPDK_VHOST_INTERNAL_H
8 #include <linux/virtio_config.h>
9 
10 #include "spdk/stdinc.h"
11 
12 #include <rte_vhost.h>
13 
14 #include "spdk_internal/vhost_user.h"
15 #include "spdk/bdev.h"
16 #include "spdk/log.h"
17 #include "spdk/util.h"
18 #include "spdk/rpc.h"
19 #include "spdk/config.h"
20 #include "spdk/tree.h"
21 
22 #define SPDK_VHOST_MAX_VQUEUES	256
23 #define SPDK_VHOST_MAX_VQ_SIZE	1024
24 
25 #define SPDK_VHOST_SCSI_CTRLR_MAX_DEVS 8
26 
27 #define SPDK_VHOST_IOVS_MAX 129
28 
29 #define SPDK_VHOST_VQ_MAX_SUBMISSIONS	32
30 
31 /*
32  * Rate at which stats are checked for interrupt coalescing.
33  */
34 #define SPDK_VHOST_STATS_CHECK_INTERVAL_MS 10
35 /*
36  * Default threshold at which interrupts start to be coalesced.
37  */
38 #define SPDK_VHOST_VQ_IOPS_COALESCING_THRESHOLD 60000
39 
40 /*
41  * Timeout in seconds for vhost-user session stop message.
42  */
43 #define SPDK_VHOST_SESSION_STOP_TIMEOUT_IN_SEC 3
44 /*
45  * Stop retry timeout in seconds, this value should be greater than SPDK_VHOST_SESSION_STOP_TIMEOUT_IN_SEC.
46  */
47 #define SPDK_VHOST_SESSION_STOP_RETRY_TIMEOUT_IN_SEC (SPDK_VHOST_SESSION_STOP_TIMEOUT_IN_SEC + 1)
48 /*
49  * Stop retry period in microseconds
50  */
51 #define SPDK_VHOST_SESSION_STOP_RETRY_PERIOD_IN_US 1000
52 
53 /*
54  * Currently coalescing is not used by default.
55  * Setting this to value > 0 here or by RPC will enable coalescing.
56  */
57 #define SPDK_VHOST_COALESCING_DELAY_BASE_US 0
58 
59 #define SPDK_VHOST_FEATURES ((1ULL << VHOST_F_LOG_ALL) | \
60 	(1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
61 	(1ULL << VIRTIO_F_VERSION_1) | \
62 	(1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \
63 	(1ULL << VIRTIO_RING_F_EVENT_IDX) | \
64 	(1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
65 	(1ULL << VIRTIO_F_ANY_LAYOUT))
66 
67 #define SPDK_VHOST_DISABLED_FEATURES ((1ULL << VIRTIO_RING_F_EVENT_IDX) | \
68 	(1ULL << VIRTIO_F_NOTIFY_ON_EMPTY))
69 
70 #define VRING_DESC_F_AVAIL	(1ULL << VRING_PACKED_DESC_F_AVAIL)
71 #define VRING_DESC_F_USED	(1ULL << VRING_PACKED_DESC_F_USED)
72 #define VRING_DESC_F_AVAIL_USED	(VRING_DESC_F_AVAIL | VRING_DESC_F_USED)
73 
74 typedef struct rte_vhost_resubmit_desc spdk_vhost_resubmit_desc;
75 typedef struct rte_vhost_resubmit_info spdk_vhost_resubmit_info;
76 typedef struct rte_vhost_inflight_desc_packed	spdk_vhost_inflight_desc;
77 
78 struct spdk_vhost_virtqueue {
79 	struct rte_vhost_vring vring;
80 	struct rte_vhost_ring_inflight vring_inflight;
81 	uint16_t last_avail_idx;
82 	uint16_t last_used_idx;
83 
84 	struct {
85 		/* To mark a descriptor as available in packed ring
86 		 * Equal to avail_wrap_counter in spec.
87 		 */
88 		uint8_t avail_phase	: 1;
89 		/* To mark a descriptor as used in packed ring
90 		 * Equal to used_wrap_counter in spec.
91 		 */
92 		uint8_t used_phase	: 1;
93 		uint8_t padding		: 5;
94 		bool packed_ring	: 1;
95 	} packed;
96 
97 	void *tasks;
98 
99 	/* Request count from last stats check */
100 	uint32_t req_cnt;
101 
102 	/* Request count from last event */
103 	uint16_t used_req_cnt;
104 
105 	/* How long interrupt is delayed */
106 	uint32_t irq_delay_time;
107 
108 	/* Next time when we need to send event */
109 	uint64_t next_event_time;
110 
111 	/* Associated vhost_virtqueue in the virtio device's virtqueue list */
112 	uint32_t vring_idx;
113 
114 	struct spdk_vhost_session *vsession;
115 
116 	struct spdk_interrupt *intr;
117 } __attribute((aligned(SPDK_CACHE_LINE_SIZE)));
118 
119 struct spdk_vhost_session {
120 	struct spdk_vhost_dev *vdev;
121 
122 	/* rte_vhost connection ID. */
123 	int vid;
124 
125 	/* Unique session ID. */
126 	uint64_t id;
127 	/* Unique session name. */
128 	char *name;
129 
130 	bool started;
131 	bool starting;
132 	bool needs_restart;
133 
134 	struct rte_vhost_memory *mem;
135 
136 	int task_cnt;
137 
138 	uint16_t max_queues;
139 	/* Maximum number of queues before restart, used with 'needs_restart' flag */
140 	uint16_t original_max_queues;
141 
142 	uint64_t negotiated_features;
143 
144 	/* Local copy of device coalescing settings. */
145 	uint32_t coalescing_delay_time_base;
146 	uint32_t coalescing_io_rate_threshold;
147 
148 	/* Next time when stats for event coalescing will be checked. */
149 	uint64_t next_stats_check_time;
150 
151 	/* Interval used for event coalescing checking. */
152 	uint64_t stats_check_interval;
153 
154 	/* Session's stop poller will only try limited times to destroy the session. */
155 	uint32_t stop_retry_count;
156 
157 	/**
158 	 * DPDK calls our callbacks synchronously but the work those callbacks
159 	 * perform needs to be async. Luckily, all DPDK callbacks are called on
160 	 * a DPDK-internal pthread and only related to the current session, so we'll
161 	 * just wait on a semaphore of this session in there.
162 	 */
163 	sem_t dpdk_sem;
164 
165 	/** Return code for the current DPDK callback */
166 	int dpdk_response;
167 
168 	struct spdk_vhost_virtqueue virtqueue[SPDK_VHOST_MAX_VQUEUES];
169 
170 	TAILQ_ENTRY(spdk_vhost_session) tailq;
171 };
172 
173 struct spdk_vhost_user_dev {
174 	struct spdk_vhost_dev *vdev;
175 
176 	const struct spdk_vhost_user_dev_backend *user_backend;
177 
178 	/* Saved original values used to setup coalescing to avoid integer
179 	 * rounding issues during save/load config.
180 	 */
181 	uint32_t coalescing_delay_us;
182 	uint32_t coalescing_iops_threshold;
183 
184 	bool registered;
185 
186 	/* Use this lock to protect multiple sessions. */
187 	pthread_mutex_t lock;
188 
189 	/* Current connections to the device */
190 	TAILQ_HEAD(, spdk_vhost_session) vsessions;
191 
192 	/* Increment-only session counter */
193 	uint64_t vsessions_num;
194 
195 	/* Number of pending asynchronous operations */
196 	uint32_t pending_async_op_num;
197 };
198 
199 struct spdk_vhost_dev {
200 	char *name;
201 	char *path;
202 
203 	bool use_default_cpumask;
204 	struct spdk_thread *thread;
205 
206 	uint64_t virtio_features;
207 	uint64_t disabled_features;
208 	uint64_t protocol_features;
209 
210 	const struct spdk_vhost_dev_backend *backend;
211 
212 	/* Context passed from transport */
213 	void *ctxt;
214 
215 	RB_ENTRY(spdk_vhost_dev) node;
216 };
217 
218 static inline struct spdk_vhost_user_dev *
219 to_user_dev(struct spdk_vhost_dev *vdev)
220 {
221 	assert(vdev != NULL);
222 	return vdev->ctxt;
223 }
224 
225 /**
226  * \param vdev vhost device.
227  * \param vsession vhost session.
228  * \param arg user-provided parameter.
229  *
230  * \return negative values will break the foreach call, meaning
231  * the function won't be called again. Return codes zero and
232  * positive don't have any effect.
233  */
234 typedef int (*spdk_vhost_session_fn)(struct spdk_vhost_dev *vdev,
235 				     struct spdk_vhost_session *vsession,
236 				     void *arg);
237 
238 /**
239  * \param vdev vhost device.
240  * \param arg user-provided parameter.
241  */
242 typedef void (*spdk_vhost_dev_fn)(struct spdk_vhost_dev *vdev, void *arg);
243 
244 struct spdk_vhost_user_dev_backend {
245 	/**
246 	 * Size of additional per-session context data
247 	 * allocated whenever a new client connects.
248 	 */
249 	size_t session_ctx_size;
250 
251 	spdk_vhost_session_fn start_session;
252 	spdk_vhost_session_fn stop_session;
253 	int (*alloc_vq_tasks)(struct spdk_vhost_session *vsession, uint16_t qid);
254 	int (*enable_vq)(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq);
255 };
256 
257 enum vhost_backend_type {
258 	VHOST_BACKEND_BLK = 0,
259 	VHOST_BACKEND_SCSI,
260 };
261 
262 struct spdk_vhost_dev_backend {
263 	enum vhost_backend_type type;
264 
265 	int (*vhost_get_config)(struct spdk_vhost_dev *vdev, uint8_t *config, uint32_t len);
266 	int (*vhost_set_config)(struct spdk_vhost_dev *vdev, uint8_t *config,
267 				uint32_t offset, uint32_t size, uint32_t flags);
268 
269 	void (*dump_info_json)(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
270 	void (*write_config_json)(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
271 	int (*remove_device)(struct spdk_vhost_dev *vdev);
272 	int (*set_coalescing)(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
273 			      uint32_t iops_threshold);
274 	void (*get_coalescing)(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
275 			       uint32_t *iops_threshold);
276 };
277 
278 void *vhost_gpa_to_vva(struct spdk_vhost_session *vsession, uint64_t addr, uint64_t len);
279 
280 uint16_t vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *vq, uint16_t *reqs,
281 				 uint16_t reqs_len);
282 
283 /**
284  * Get a virtio split descriptor at given index in given virtqueue.
285  * The descriptor will provide access to the entire descriptor
286  * chain. The subsequent descriptors are accessible via
287  * \c spdk_vhost_vring_desc_get_next.
288  * \param vsession vhost session
289  * \param vq virtqueue
290  * \param req_idx descriptor index
291  * \param desc pointer to be set to the descriptor
292  * \param desc_table descriptor table to be used with
293  * \c spdk_vhost_vring_desc_get_next. This might be either
294  * default virtqueue descriptor table or per-chain indirect
295  * table.
296  * \param desc_table_size size of the *desc_table*
297  * \return 0 on success, -1 if given index is invalid.
298  * If -1 is returned, the content of params is undefined.
299  */
300 int vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq,
301 		      uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
302 		      uint32_t *desc_table_size);
303 
304 /**
305  * Get a virtio packed descriptor at given index in given virtqueue.
306  * The descriptor will provide access to the entire descriptor
307  * chain. The subsequent descriptors are accessible via
308  * \c vhost_vring_packed_desc_get_next.
309  * \param vsession vhost session
310  * \param vq virtqueue
311  * \param req_idx descriptor index
312  * \param desc pointer to be set to the descriptor
313  * \param desc_table descriptor table to be used with
314  * \c spdk_vhost_vring_desc_get_next. This might be either
315  * \c NULL or per-chain indirect table.
316  * \param desc_table_size size of the *desc_table*
317  * \return 0 on success, -1 if given index is invalid.
318  * If -1 is returned, the content of params is undefined.
319  */
320 int vhost_vq_get_desc_packed(struct spdk_vhost_session *vsession,
321 			     struct spdk_vhost_virtqueue *virtqueue,
322 			     uint16_t req_idx, struct vring_packed_desc **desc,
323 			     struct vring_packed_desc **desc_table, uint32_t *desc_table_size);
324 
325 int vhost_inflight_queue_get_desc(struct spdk_vhost_session *vsession,
326 				  spdk_vhost_inflight_desc *desc_array,
327 				  uint16_t req_idx, spdk_vhost_inflight_desc **desc,
328 				  struct vring_packed_desc  **desc_table, uint32_t *desc_table_size);
329 
330 /**
331  * Send IRQ/call client (if pending) for \c vq.
332  * \param vsession vhost session
333  * \param vq virtqueue
334  * \return
335  *   0 - if no interrupt was signalled
336  *   1 - if interrupt was signalled
337  */
338 int vhost_vq_used_signal(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq);
339 
340 /**
341  * Send IRQs for the queue that need to be signaled.
342  * \param vq virtqueue
343  */
344 void vhost_session_vq_used_signal(struct spdk_vhost_virtqueue *virtqueue);
345 
346 void vhost_vq_used_ring_enqueue(struct spdk_vhost_session *vsession,
347 				struct spdk_vhost_virtqueue *vq,
348 				uint16_t id, uint32_t len);
349 
350 /**
351  * Enqueue the entry to the used ring when device complete the request.
352  * \param vsession vhost session
353  * \param vq virtqueue
354  * \req_idx descriptor index. It's the first index of this descriptor chain.
355  * \num_descs descriptor count. It's the count of the number of buffers in the chain.
356  * \buffer_id descriptor buffer ID.
357  * \length device write length. Specify the length of the buffer that has been initialized
358  * (written to) by the device
359  * \inflight_head the head idx of this IO inflight desc chain.
360  */
361 void vhost_vq_packed_ring_enqueue(struct spdk_vhost_session *vsession,
362 				  struct spdk_vhost_virtqueue *virtqueue,
363 				  uint16_t num_descs, uint16_t buffer_id,
364 				  uint32_t length, uint16_t inflight_head);
365 
366 /**
367  * Get subsequent descriptor from given table.
368  * \param desc current descriptor, will be set to the
369  * next descriptor (NULL in case this is the last
370  * descriptor in the chain or the next desc is invalid)
371  * \param desc_table descriptor table
372  * \param desc_table_size size of the *desc_table*
373  * \return 0 on success, -1 if given index is invalid
374  * The *desc* param will be set regardless of the
375  * return value.
376  */
377 int vhost_vring_desc_get_next(struct vring_desc **desc,
378 			      struct vring_desc *desc_table, uint32_t desc_table_size);
379 static inline bool
380 vhost_vring_desc_is_wr(struct vring_desc *cur_desc)
381 {
382 	return !!(cur_desc->flags & VRING_DESC_F_WRITE);
383 }
384 
385 int vhost_vring_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
386 			    uint16_t *iov_index, const struct vring_desc *desc);
387 
388 bool vhost_vq_packed_ring_is_avail(struct spdk_vhost_virtqueue *virtqueue);
389 
390 /**
391  * Get subsequent descriptor from vq or desc table.
392  * \param desc current descriptor, will be set to the
393  * next descriptor (NULL in case this is the last
394  * descriptor in the chain or the next desc is invalid)
395  * \req_idx index of current desc, will be set to the next
396  * index. If desc_table != NULL the req_idx is the the vring index
397  * or the req_idx is the desc_table index.
398  * \param desc_table descriptor table
399  * \param desc_table_size size of the *desc_table*
400  * \return 0 on success, -1 if given index is invalid
401  * The *desc* param will be set regardless of the
402  * return value.
403  */
404 int vhost_vring_packed_desc_get_next(struct vring_packed_desc **desc, uint16_t *req_idx,
405 				     struct spdk_vhost_virtqueue *vq,
406 				     struct vring_packed_desc *desc_table,
407 				     uint32_t desc_table_size);
408 
409 bool vhost_vring_packed_desc_is_wr(struct vring_packed_desc *cur_desc);
410 
411 int vhost_vring_packed_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
412 				   uint16_t *iov_index, const struct vring_packed_desc *desc);
413 
414 bool vhost_vring_inflight_desc_is_wr(spdk_vhost_inflight_desc *cur_desc);
415 
416 int vhost_vring_inflight_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
417 				     uint16_t *iov_index, const spdk_vhost_inflight_desc *desc);
418 
419 uint16_t vhost_vring_packed_desc_get_buffer_id(struct spdk_vhost_virtqueue *vq, uint16_t req_idx,
420 		uint16_t *num_descs);
421 
422 static inline bool
423 __attribute__((always_inline))
424 vhost_dev_has_feature(struct spdk_vhost_session *vsession, unsigned feature_id)
425 {
426 	return vsession->negotiated_features & (1ULL << feature_id);
427 }
428 
429 int vhost_scsi_controller_start(const char *name);
430 
431 int vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
432 		       const struct spdk_json_val *params, const struct spdk_vhost_dev_backend *backend,
433 		       const struct spdk_vhost_user_dev_backend *user_backend, bool delay);
434 
435 int vhost_dev_unregister(struct spdk_vhost_dev *vdev);
436 
437 void vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
438 
439 /*
440  * Set vhost session to run in interrupt or poll mode
441  */
442 void vhost_user_session_set_interrupt_mode(struct spdk_vhost_session *vsession,
443 		bool interrupt_mode);
444 
445 /*
446  * Memory registration functions used in start/stop device callbacks
447  */
448 void vhost_session_mem_register(struct rte_vhost_memory *mem);
449 void vhost_session_mem_unregister(struct rte_vhost_memory *mem);
450 
451 /*
452  * Call a function for each session of the provided vhost device.
453  * The function will be called one-by-one on each session's thread.
454  *
455  * \param vdev vhost device
456  * \param fn function to call on each session's thread
457  * \param cpl_fn function to be called at the end of the iteration on
458  * the vhost management thread.
459  * Optional, can be NULL.
460  * \param arg additional argument to the both callbacks
461  */
462 void vhost_user_dev_foreach_session(struct spdk_vhost_dev *dev,
463 				    spdk_vhost_session_fn fn,
464 				    spdk_vhost_dev_fn cpl_fn,
465 				    void *arg);
466 
467 /**
468  * Finish a blocking vhost_user_wait_for_session_stop() call and finally
469  * stop the session. This must be called on the session's lcore which
470  * used to receive all session-related messages (e.g. from
471  * vhost_user_dev_foreach_session()). After this call, the session-
472  * related messages will be once again processed by any arbitrary thread.
473  *
474  * Must be called under the vhost user device's session access lock.
475  *
476  * \param vsession vhost session
477  * \param response return code
478  */
479 void vhost_user_session_stop_done(struct spdk_vhost_session *vsession, int response);
480 
481 struct spdk_vhost_session *vhost_session_find_by_vid(int vid);
482 void vhost_session_install_rte_compat_hooks(struct spdk_vhost_session *vsession);
483 int vhost_register_unix_socket(const char *path, const char *ctrl_name,
484 			       uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features);
485 int vhost_driver_unregister(const char *path);
486 int vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
487 int vhost_get_negotiated_features(int vid, uint64_t *negotiated_features);
488 
489 int remove_vhost_controller(struct spdk_vhost_dev *vdev);
490 
491 struct spdk_io_channel *vhost_blk_get_io_channel(struct spdk_vhost_dev *vdev);
492 void vhost_blk_put_io_channel(struct spdk_io_channel *ch);
493 
494 /* The spdk_bdev pointer should only be used to retrieve
495  * the device properties, ex. number of blocks or I/O type supported. */
496 struct spdk_bdev *vhost_blk_get_bdev(struct spdk_vhost_dev *vdev);
497 
498 /* Function calls from vhost.c to rte_vhost_user.c,
499  * shall removed once virtio transport abstraction is complete. */
500 int vhost_user_session_set_coalescing(struct spdk_vhost_dev *dev,
501 				      struct spdk_vhost_session *vsession, void *ctx);
502 int vhost_user_dev_set_coalescing(struct spdk_vhost_user_dev *user_dev, uint32_t delay_base_us,
503 				  uint32_t iops_threshold);
504 int vhost_user_dev_create(struct spdk_vhost_dev *vdev, const char *name,
505 			  struct spdk_cpuset *cpumask,
506 			  const struct spdk_vhost_user_dev_backend *user_backend, bool dealy);
507 int vhost_user_dev_init(struct spdk_vhost_dev *vdev, const char *name,
508 			struct spdk_cpuset *cpumask, const struct spdk_vhost_user_dev_backend *user_backend);
509 int vhost_user_dev_start(struct spdk_vhost_dev *vdev);
510 bool vhost_user_dev_busy(struct spdk_vhost_dev *vdev);
511 int vhost_user_dev_unregister(struct spdk_vhost_dev *vdev);
512 int vhost_user_init(void);
513 void vhost_user_fini(spdk_vhost_fini_cb vhost_cb);
514 int vhost_user_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
515 			      uint32_t iops_threshold);
516 void vhost_user_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
517 			       uint32_t *iops_threshold);
518 
519 int virtio_blk_construct_ctrlr(struct spdk_vhost_dev *vdev, const char *address,
520 			       struct spdk_cpuset *cpumask, const struct spdk_json_val *params,
521 			       const struct spdk_vhost_user_dev_backend *user_backend);
522 int virtio_blk_destroy_ctrlr(struct spdk_vhost_dev *vdev);
523 
524 struct spdk_vhost_blk_task;
525 
526 typedef void (*virtio_blk_request_cb)(uint8_t status, struct spdk_vhost_blk_task *task,
527 				      void *cb_arg);
528 
529 struct spdk_vhost_blk_task {
530 	struct spdk_bdev_io *bdev_io;
531 	virtio_blk_request_cb cb;
532 	void *cb_arg;
533 
534 	volatile uint8_t *status;
535 
536 	/* for io wait */
537 	struct spdk_bdev_io_wait_entry bdev_io_wait;
538 	struct spdk_io_channel *bdev_io_wait_ch;
539 	struct spdk_vhost_dev *bdev_io_wait_vdev;
540 
541 	/** Number of bytes that were written. */
542 	uint32_t used_len;
543 	uint16_t iovcnt;
544 	struct iovec iovs[SPDK_VHOST_IOVS_MAX];
545 
546 	/** Size of whole payload in bytes */
547 	uint32_t payload_size;
548 };
549 
550 int virtio_blk_process_request(struct spdk_vhost_dev *vdev, struct spdk_io_channel *ch,
551 			       struct spdk_vhost_blk_task *task, virtio_blk_request_cb cb, void *cb_arg);
552 
553 typedef void (*bdev_event_cb_complete)(struct spdk_vhost_dev *vdev, void *ctx);
554 
555 #define SPDK_VIRTIO_BLK_TRSTRING_MAX_LEN 32
556 
557 struct spdk_virtio_blk_transport_ops {
558 	/**
559 	 * Transport name
560 	 */
561 	char name[SPDK_VIRTIO_BLK_TRSTRING_MAX_LEN];
562 
563 	/**
564 	 * Create a transport for the given transport opts
565 	 */
566 	struct spdk_virtio_blk_transport *(*create)(const struct spdk_json_val *params);
567 
568 	/**
569 	 * Dump transport-specific opts into JSON
570 	 */
571 	void (*dump_opts)(struct spdk_virtio_blk_transport *transport, struct spdk_json_write_ctx *w);
572 
573 	/**
574 	 * Destroy the transport
575 	 */
576 	int (*destroy)(struct spdk_virtio_blk_transport *transport,
577 		       spdk_vhost_fini_cb cb_fn);
578 
579 	/**
580 	 * Create vhost block controller
581 	 */
582 	int (*create_ctrlr)(struct spdk_vhost_dev *vdev, struct spdk_cpuset *cpumask,
583 			    const char *address, const struct spdk_json_val *params,
584 			    void *custom_opts);
585 
586 	/**
587 	 * Destroy vhost block controller
588 	 */
589 	int (*destroy_ctrlr)(struct spdk_vhost_dev *vdev);
590 
591 	/*
592 	 * Signal removal of the bdev.
593 	 */
594 	void (*bdev_event)(enum spdk_bdev_event_type type, struct spdk_vhost_dev *vdev,
595 			   bdev_event_cb_complete cb, void *cb_arg);
596 
597 	/**
598 	 * Set coalescing parameters.
599 	 */
600 	int (*set_coalescing)(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
601 			      uint32_t iops_threshold);
602 
603 	/**
604 	 * Get coalescing parameters.
605 	 */
606 	void (*get_coalescing)(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
607 			       uint32_t *iops_threshold);
608 };
609 
610 struct spdk_virtio_blk_transport {
611 	const struct spdk_virtio_blk_transport_ops	*ops;
612 	TAILQ_ENTRY(spdk_virtio_blk_transport)		tailq;
613 };
614 
615 struct virtio_blk_transport_ops_list_element {
616 	struct spdk_virtio_blk_transport_ops			ops;
617 	TAILQ_ENTRY(virtio_blk_transport_ops_list_element)	link;
618 };
619 
620 void virtio_blk_transport_register(const struct spdk_virtio_blk_transport_ops *ops);
621 int virtio_blk_transport_create(const char *transport_name, const struct spdk_json_val *params);
622 int virtio_blk_transport_destroy(struct spdk_virtio_blk_transport *transport,
623 				 spdk_vhost_fini_cb cb_fn);
624 struct spdk_virtio_blk_transport *virtio_blk_transport_get_first(void);
625 struct spdk_virtio_blk_transport *virtio_blk_transport_get_next(
626 	struct spdk_virtio_blk_transport *transport);
627 void virtio_blk_transport_dump_opts(struct spdk_virtio_blk_transport *transport,
628 				    struct spdk_json_write_ctx *w);
629 struct spdk_virtio_blk_transport *virtio_blk_tgt_get_transport(const char *transport_name);
630 const struct spdk_virtio_blk_transport_ops *virtio_blk_get_transport_ops(
631 	const char *transport_name);
632 
633 void vhost_session_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
634 
635 /*
636  * Macro used to register new transports.
637  */
638 #define SPDK_VIRTIO_BLK_TRANSPORT_REGISTER(name, transport_ops) \
639 static void __attribute__((constructor)) _virtio_blk_transport_register_##name(void) \
640 { \
641 	virtio_blk_transport_register(transport_ops); \
642 }
643 
644 #endif /* SPDK_VHOST_INTERNAL_H */
645