xref: /spdk/lib/vhost/vhost_internal.h (revision d987d777d6b8ce05f11cb1d90f1241bfecfc9af4)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #ifndef SPDK_VHOST_INTERNAL_H
7 #define SPDK_VHOST_INTERNAL_H
8 #include <linux/virtio_config.h>
9 
10 #include "spdk/stdinc.h"
11 
12 #include <rte_vhost.h>
13 
14 #include "spdk_internal/vhost_user.h"
15 #include "spdk/bdev.h"
16 #include "spdk/log.h"
17 #include "spdk/util.h"
18 #include "spdk/rpc.h"
19 #include "spdk/config.h"
20 
21 #define SPDK_VHOST_MAX_VQUEUES	256
22 #define SPDK_VHOST_MAX_VQ_SIZE	1024
23 
24 #define SPDK_VHOST_SCSI_CTRLR_MAX_DEVS 8
25 
26 #define SPDK_VHOST_IOVS_MAX 129
27 
28 #define SPDK_VHOST_VQ_MAX_SUBMISSIONS	32
29 
30 /*
31  * Rate at which stats are checked for interrupt coalescing.
32  */
33 #define SPDK_VHOST_STATS_CHECK_INTERVAL_MS 10
34 /*
35  * Default threshold at which interrupts start to be coalesced.
36  */
37 #define SPDK_VHOST_VQ_IOPS_COALESCING_THRESHOLD 60000
38 
39 /*
40  * Timeout in seconds for vhost-user session stop message.
41  */
42 #define SPDK_VHOST_SESSION_STOP_TIMEOUT_IN_SEC 3
43 /*
44  * Stop retry timeout in seconds, this value should be greater than SPDK_VHOST_SESSION_STOP_TIMEOUT_IN_SEC.
45  */
46 #define SPDK_VHOST_SESSION_STOP_RETRY_TIMEOUT_IN_SEC (SPDK_VHOST_SESSION_STOP_TIMEOUT_IN_SEC + 1)
47 /*
48  * Stop retry period in microseconds
49  */
50 #define SPDK_VHOST_SESSION_STOP_RETRY_PERIOD_IN_US 1000
51 
52 /*
53  * Currently coalescing is not used by default.
54  * Setting this to value > 0 here or by RPC will enable coalescing.
55  */
56 #define SPDK_VHOST_COALESCING_DELAY_BASE_US 0
57 
58 #define SPDK_VHOST_FEATURES ((1ULL << VHOST_F_LOG_ALL) | \
59 	(1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
60 	(1ULL << VIRTIO_F_VERSION_1) | \
61 	(1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \
62 	(1ULL << VIRTIO_RING_F_EVENT_IDX) | \
63 	(1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
64 	(1ULL << VIRTIO_F_ANY_LAYOUT))
65 
66 #define SPDK_VHOST_DISABLED_FEATURES ((1ULL << VIRTIO_RING_F_EVENT_IDX) | \
67 	(1ULL << VIRTIO_F_NOTIFY_ON_EMPTY))
68 
69 #define VRING_DESC_F_AVAIL	(1ULL << VRING_PACKED_DESC_F_AVAIL)
70 #define VRING_DESC_F_USED	(1ULL << VRING_PACKED_DESC_F_USED)
71 #define VRING_DESC_F_AVAIL_USED	(VRING_DESC_F_AVAIL | VRING_DESC_F_USED)
72 
73 typedef struct rte_vhost_resubmit_desc spdk_vhost_resubmit_desc;
74 typedef struct rte_vhost_resubmit_info spdk_vhost_resubmit_info;
75 typedef struct rte_vhost_inflight_desc_packed	spdk_vhost_inflight_desc;
76 
77 struct spdk_vhost_virtqueue {
78 	struct rte_vhost_vring vring;
79 	struct rte_vhost_ring_inflight vring_inflight;
80 	uint16_t last_avail_idx;
81 	uint16_t last_used_idx;
82 
83 	struct {
84 		/* To mark a descriptor as available in packed ring
85 		 * Equal to avail_wrap_counter in spec.
86 		 */
87 		uint8_t avail_phase	: 1;
88 		/* To mark a descriptor as used in packed ring
89 		 * Equal to used_wrap_counter in spec.
90 		 */
91 		uint8_t used_phase	: 1;
92 		uint8_t padding		: 5;
93 		bool packed_ring	: 1;
94 	} packed;
95 
96 	void *tasks;
97 
98 	/* Request count from last stats check */
99 	uint32_t req_cnt;
100 
101 	/* Request count from last event */
102 	uint16_t used_req_cnt;
103 
104 	/* How long interrupt is delayed */
105 	uint32_t irq_delay_time;
106 
107 	/* Next time when we need to send event */
108 	uint64_t next_event_time;
109 
110 	/* Associated vhost_virtqueue in the virtio device's virtqueue list */
111 	uint32_t vring_idx;
112 
113 	struct spdk_vhost_session *vsession;
114 
115 	struct spdk_interrupt *intr;
116 } __attribute((aligned(SPDK_CACHE_LINE_SIZE)));
117 
118 struct spdk_vhost_session {
119 	struct spdk_vhost_dev *vdev;
120 
121 	/* rte_vhost connection ID. */
122 	int vid;
123 
124 	/* Unique session ID. */
125 	uint64_t id;
126 	/* Unique session name. */
127 	char *name;
128 
129 	bool started;
130 	bool starting;
131 	bool needs_restart;
132 
133 	struct rte_vhost_memory *mem;
134 
135 	int task_cnt;
136 
137 	uint16_t max_queues;
138 	/* Maximum number of queues before restart, used with 'needs_restart' flag */
139 	uint16_t original_max_queues;
140 
141 	uint64_t negotiated_features;
142 
143 	/* Local copy of device coalescing settings. */
144 	uint32_t coalescing_delay_time_base;
145 	uint32_t coalescing_io_rate_threshold;
146 
147 	/* Next time when stats for event coalescing will be checked. */
148 	uint64_t next_stats_check_time;
149 
150 	/* Interval used for event coalescing checking. */
151 	uint64_t stats_check_interval;
152 
153 	/* Session's stop poller will only try limited times to destroy the session. */
154 	uint32_t stop_retry_count;
155 
156 	/**
157 	 * DPDK calls our callbacks synchronously but the work those callbacks
158 	 * perform needs to be async. Luckily, all DPDK callbacks are called on
159 	 * a DPDK-internal pthread and only related to the current session, so we'll
160 	 * just wait on a semaphore of this session in there.
161 	 */
162 	sem_t dpdk_sem;
163 
164 	/** Return code for the current DPDK callback */
165 	int dpdk_response;
166 
167 	struct spdk_vhost_virtqueue virtqueue[SPDK_VHOST_MAX_VQUEUES];
168 
169 	TAILQ_ENTRY(spdk_vhost_session) tailq;
170 };
171 
172 struct spdk_vhost_user_dev {
173 	struct spdk_vhost_dev *vdev;
174 
175 	const struct spdk_vhost_user_dev_backend *user_backend;
176 
177 	/* Saved original values used to setup coalescing to avoid integer
178 	 * rounding issues during save/load config.
179 	 */
180 	uint32_t coalescing_delay_us;
181 	uint32_t coalescing_iops_threshold;
182 
183 	bool registered;
184 
185 	/* Use this lock to protect multiple sessions. */
186 	pthread_mutex_t lock;
187 
188 	/* Current connections to the device */
189 	TAILQ_HEAD(, spdk_vhost_session) vsessions;
190 
191 	/* Increment-only session counter */
192 	uint64_t vsessions_num;
193 
194 	/* Number of pending asynchronous operations */
195 	uint32_t pending_async_op_num;
196 };
197 
198 struct spdk_vhost_dev {
199 	char *name;
200 	char *path;
201 
202 	bool use_default_cpumask;
203 	struct spdk_thread *thread;
204 
205 	uint64_t virtio_features;
206 	uint64_t disabled_features;
207 	uint64_t protocol_features;
208 
209 	const struct spdk_vhost_dev_backend *backend;
210 
211 	/* Context passed from transport */
212 	void *ctxt;
213 
214 	TAILQ_ENTRY(spdk_vhost_dev) tailq;
215 };
216 
217 static inline struct spdk_vhost_user_dev *
218 to_user_dev(struct spdk_vhost_dev *vdev)
219 {
220 	assert(vdev != NULL);
221 	return vdev->ctxt;
222 }
223 
224 /**
225  * \param vdev vhost device.
226  * \param vsession vhost session.
227  * \param arg user-provided parameter.
228  *
229  * \return negative values will break the foreach call, meaning
230  * the function won't be called again. Return codes zero and
231  * positive don't have any effect.
232  */
233 typedef int (*spdk_vhost_session_fn)(struct spdk_vhost_dev *vdev,
234 				     struct spdk_vhost_session *vsession,
235 				     void *arg);
236 
237 /**
238  * \param vdev vhost device.
239  * \param arg user-provided parameter.
240  */
241 typedef void (*spdk_vhost_dev_fn)(struct spdk_vhost_dev *vdev, void *arg);
242 
243 struct spdk_vhost_user_dev_backend {
244 	/**
245 	 * Size of additional per-session context data
246 	 * allocated whenever a new client connects.
247 	 */
248 	size_t session_ctx_size;
249 
250 	spdk_vhost_session_fn start_session;
251 	spdk_vhost_session_fn stop_session;
252 	int (*alloc_vq_tasks)(struct spdk_vhost_session *vsession, uint16_t qid);
253 	int (*enable_vq)(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq);
254 };
255 
256 enum vhost_backend_type {
257 	VHOST_BACKEND_BLK = 0,
258 	VHOST_BACKEND_SCSI,
259 };
260 
261 struct spdk_vhost_dev_backend {
262 	enum vhost_backend_type type;
263 
264 	int (*vhost_get_config)(struct spdk_vhost_dev *vdev, uint8_t *config, uint32_t len);
265 	int (*vhost_set_config)(struct spdk_vhost_dev *vdev, uint8_t *config,
266 				uint32_t offset, uint32_t size, uint32_t flags);
267 
268 	void (*dump_info_json)(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
269 	void (*write_config_json)(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
270 	int (*remove_device)(struct spdk_vhost_dev *vdev);
271 	int (*set_coalescing)(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
272 			      uint32_t iops_threshold);
273 	void (*get_coalescing)(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
274 			       uint32_t *iops_threshold);
275 };
276 
277 void *vhost_gpa_to_vva(struct spdk_vhost_session *vsession, uint64_t addr, uint64_t len);
278 
279 uint16_t vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *vq, uint16_t *reqs,
280 				 uint16_t reqs_len);
281 
282 /**
283  * Get a virtio split descriptor at given index in given virtqueue.
284  * The descriptor will provide access to the entire descriptor
285  * chain. The subsequent descriptors are accessible via
286  * \c spdk_vhost_vring_desc_get_next.
287  * \param vsession vhost session
288  * \param vq virtqueue
289  * \param req_idx descriptor index
290  * \param desc pointer to be set to the descriptor
291  * \param desc_table descriptor table to be used with
292  * \c spdk_vhost_vring_desc_get_next. This might be either
293  * default virtqueue descriptor table or per-chain indirect
294  * table.
295  * \param desc_table_size size of the *desc_table*
296  * \return 0 on success, -1 if given index is invalid.
297  * If -1 is returned, the content of params is undefined.
298  */
299 int vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq,
300 		      uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
301 		      uint32_t *desc_table_size);
302 
303 /**
304  * Get a virtio packed descriptor at given index in given virtqueue.
305  * The descriptor will provide access to the entire descriptor
306  * chain. The subsequent descriptors are accessible via
307  * \c vhost_vring_packed_desc_get_next.
308  * \param vsession vhost session
309  * \param vq virtqueue
310  * \param req_idx descriptor index
311  * \param desc pointer to be set to the descriptor
312  * \param desc_table descriptor table to be used with
313  * \c spdk_vhost_vring_desc_get_next. This might be either
314  * \c NULL or per-chain indirect table.
315  * \param desc_table_size size of the *desc_table*
316  * \return 0 on success, -1 if given index is invalid.
317  * If -1 is returned, the content of params is undefined.
318  */
319 int vhost_vq_get_desc_packed(struct spdk_vhost_session *vsession,
320 			     struct spdk_vhost_virtqueue *virtqueue,
321 			     uint16_t req_idx, struct vring_packed_desc **desc,
322 			     struct vring_packed_desc **desc_table, uint32_t *desc_table_size);
323 
324 int vhost_inflight_queue_get_desc(struct spdk_vhost_session *vsession,
325 				  spdk_vhost_inflight_desc *desc_array,
326 				  uint16_t req_idx, spdk_vhost_inflight_desc **desc,
327 				  struct vring_packed_desc  **desc_table, uint32_t *desc_table_size);
328 
329 /**
330  * Send IRQ/call client (if pending) for \c vq.
331  * \param vsession vhost session
332  * \param vq virtqueue
333  * \return
334  *   0 - if no interrupt was signalled
335  *   1 - if interrupt was signalled
336  */
337 int vhost_vq_used_signal(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq);
338 
339 /**
340  * Send IRQs for the queue that need to be signaled.
341  * \param vq virtqueue
342  */
343 void vhost_session_vq_used_signal(struct spdk_vhost_virtqueue *virtqueue);
344 
345 void vhost_vq_used_ring_enqueue(struct spdk_vhost_session *vsession,
346 				struct spdk_vhost_virtqueue *vq,
347 				uint16_t id, uint32_t len);
348 
349 /**
350  * Enqueue the entry to the used ring when device complete the request.
351  * \param vsession vhost session
352  * \param vq virtqueue
353  * \req_idx descriptor index. It's the first index of this descriptor chain.
354  * \num_descs descriptor count. It's the count of the number of buffers in the chain.
355  * \buffer_id descriptor buffer ID.
356  * \length device write length. Specify the length of the buffer that has been initialized
357  * (written to) by the device
358  * \inflight_head the head idx of this IO inflight desc chain.
359  */
360 void vhost_vq_packed_ring_enqueue(struct spdk_vhost_session *vsession,
361 				  struct spdk_vhost_virtqueue *virtqueue,
362 				  uint16_t num_descs, uint16_t buffer_id,
363 				  uint32_t length, uint16_t inflight_head);
364 
365 /**
366  * Get subsequent descriptor from given table.
367  * \param desc current descriptor, will be set to the
368  * next descriptor (NULL in case this is the last
369  * descriptor in the chain or the next desc is invalid)
370  * \param desc_table descriptor table
371  * \param desc_table_size size of the *desc_table*
372  * \return 0 on success, -1 if given index is invalid
373  * The *desc* param will be set regardless of the
374  * return value.
375  */
376 int vhost_vring_desc_get_next(struct vring_desc **desc,
377 			      struct vring_desc *desc_table, uint32_t desc_table_size);
378 static inline bool
379 vhost_vring_desc_is_wr(struct vring_desc *cur_desc)
380 {
381 	return !!(cur_desc->flags & VRING_DESC_F_WRITE);
382 }
383 
384 int vhost_vring_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
385 			    uint16_t *iov_index, const struct vring_desc *desc);
386 
387 bool vhost_vq_packed_ring_is_avail(struct spdk_vhost_virtqueue *virtqueue);
388 
389 /**
390  * Get subsequent descriptor from vq or desc table.
391  * \param desc current descriptor, will be set to the
392  * next descriptor (NULL in case this is the last
393  * descriptor in the chain or the next desc is invalid)
394  * \req_idx index of current desc, will be set to the next
395  * index. If desc_table != NULL the req_idx is the the vring index
396  * or the req_idx is the desc_table index.
397  * \param desc_table descriptor table
398  * \param desc_table_size size of the *desc_table*
399  * \return 0 on success, -1 if given index is invalid
400  * The *desc* param will be set regardless of the
401  * return value.
402  */
403 int vhost_vring_packed_desc_get_next(struct vring_packed_desc **desc, uint16_t *req_idx,
404 				     struct spdk_vhost_virtqueue *vq,
405 				     struct vring_packed_desc *desc_table,
406 				     uint32_t desc_table_size);
407 
408 bool vhost_vring_packed_desc_is_wr(struct vring_packed_desc *cur_desc);
409 
410 int vhost_vring_packed_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
411 				   uint16_t *iov_index, const struct vring_packed_desc *desc);
412 
413 bool vhost_vring_inflight_desc_is_wr(spdk_vhost_inflight_desc *cur_desc);
414 
415 int vhost_vring_inflight_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
416 				     uint16_t *iov_index, const spdk_vhost_inflight_desc *desc);
417 
418 uint16_t vhost_vring_packed_desc_get_buffer_id(struct spdk_vhost_virtqueue *vq, uint16_t req_idx,
419 		uint16_t *num_descs);
420 
421 static inline bool
422 __attribute__((always_inline))
423 vhost_dev_has_feature(struct spdk_vhost_session *vsession, unsigned feature_id)
424 {
425 	return vsession->negotiated_features & (1ULL << feature_id);
426 }
427 
428 int vhost_scsi_controller_start(const char *name);
429 
430 int vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
431 		       const struct spdk_json_val *params, const struct spdk_vhost_dev_backend *backend,
432 		       const struct spdk_vhost_user_dev_backend *user_backend, bool delay);
433 
434 int vhost_dev_unregister(struct spdk_vhost_dev *vdev);
435 
436 void vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
437 
438 /*
439  * Set vhost session to run in interrupt or poll mode
440  */
441 void vhost_user_session_set_interrupt_mode(struct spdk_vhost_session *vsession,
442 		bool interrupt_mode);
443 
444 /*
445  * Memory registration functions used in start/stop device callbacks
446  */
447 void vhost_session_mem_register(struct rte_vhost_memory *mem);
448 void vhost_session_mem_unregister(struct rte_vhost_memory *mem);
449 
450 /*
451  * Call a function for each session of the provided vhost device.
452  * The function will be called one-by-one on each session's thread.
453  *
454  * \param vdev vhost device
455  * \param fn function to call on each session's thread
456  * \param cpl_fn function to be called at the end of the iteration on
457  * the vhost management thread.
458  * Optional, can be NULL.
459  * \param arg additional argument to the both callbacks
460  */
461 void vhost_user_dev_foreach_session(struct spdk_vhost_dev *dev,
462 				    spdk_vhost_session_fn fn,
463 				    spdk_vhost_dev_fn cpl_fn,
464 				    void *arg);
465 
466 /**
467  * Finish a blocking vhost_user_wait_for_session_stop() call and finally
468  * stop the session. This must be called on the session's lcore which
469  * used to receive all session-related messages (e.g. from
470  * vhost_user_dev_foreach_session()). After this call, the session-
471  * related messages will be once again processed by any arbitrary thread.
472  *
473  * Must be called under the vhost user device's session access lock.
474  *
475  * \param vsession vhost session
476  * \param response return code
477  */
478 void vhost_user_session_stop_done(struct spdk_vhost_session *vsession, int response);
479 
480 struct spdk_vhost_session *vhost_session_find_by_vid(int vid);
481 void vhost_session_install_rte_compat_hooks(struct spdk_vhost_session *vsession);
482 int vhost_register_unix_socket(const char *path, const char *ctrl_name,
483 			       uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features);
484 int vhost_driver_unregister(const char *path);
485 int vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
486 int vhost_get_negotiated_features(int vid, uint64_t *negotiated_features);
487 
488 int remove_vhost_controller(struct spdk_vhost_dev *vdev);
489 
490 struct spdk_io_channel *vhost_blk_get_io_channel(struct spdk_vhost_dev *vdev);
491 void vhost_blk_put_io_channel(struct spdk_io_channel *ch);
492 
493 /* The spdk_bdev pointer should only be used to retrieve
494  * the device properties, ex. number of blocks or I/O type supported. */
495 struct spdk_bdev *vhost_blk_get_bdev(struct spdk_vhost_dev *vdev);
496 
497 /* Function calls from vhost.c to rte_vhost_user.c,
498  * shall removed once virtio transport abstraction is complete. */
499 int vhost_user_session_set_coalescing(struct spdk_vhost_dev *dev,
500 				      struct spdk_vhost_session *vsession, void *ctx);
501 int vhost_user_dev_set_coalescing(struct spdk_vhost_user_dev *user_dev, uint32_t delay_base_us,
502 				  uint32_t iops_threshold);
503 int vhost_user_dev_create(struct spdk_vhost_dev *vdev, const char *name,
504 			  struct spdk_cpuset *cpumask,
505 			  const struct spdk_vhost_user_dev_backend *user_backend, bool dealy);
506 int vhost_user_dev_init(struct spdk_vhost_dev *vdev, const char *name,
507 			struct spdk_cpuset *cpumask, const struct spdk_vhost_user_dev_backend *user_backend);
508 int vhost_user_dev_start(struct spdk_vhost_dev *vdev);
509 int vhost_user_dev_unregister(struct spdk_vhost_dev *vdev);
510 int vhost_user_init(void);
511 void vhost_user_fini(spdk_vhost_fini_cb vhost_cb);
512 int vhost_user_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
513 			      uint32_t iops_threshold);
514 void vhost_user_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
515 			       uint32_t *iops_threshold);
516 
517 int virtio_blk_construct_ctrlr(struct spdk_vhost_dev *vdev, const char *address,
518 			       struct spdk_cpuset *cpumask, const struct spdk_json_val *params,
519 			       const struct spdk_vhost_user_dev_backend *user_backend);
520 int virtio_blk_destroy_ctrlr(struct spdk_vhost_dev *vdev);
521 
522 struct spdk_vhost_blk_task;
523 
524 typedef void (*virtio_blk_request_cb)(uint8_t status, struct spdk_vhost_blk_task *task,
525 				      void *cb_arg);
526 
527 struct spdk_vhost_blk_task {
528 	struct spdk_bdev_io *bdev_io;
529 	virtio_blk_request_cb cb;
530 	void *cb_arg;
531 
532 	volatile uint8_t *status;
533 
534 	/* for io wait */
535 	struct spdk_bdev_io_wait_entry bdev_io_wait;
536 	struct spdk_io_channel *bdev_io_wait_ch;
537 	struct spdk_vhost_dev *bdev_io_wait_vdev;
538 
539 	/** Number of bytes that were written. */
540 	uint32_t used_len;
541 	uint16_t iovcnt;
542 	struct iovec iovs[SPDK_VHOST_IOVS_MAX];
543 
544 	/** Size of whole payload in bytes */
545 	uint32_t payload_size;
546 };
547 
548 int virtio_blk_process_request(struct spdk_vhost_dev *vdev, struct spdk_io_channel *ch,
549 			       struct spdk_vhost_blk_task *task, virtio_blk_request_cb cb, void *cb_arg);
550 
551 typedef void (*bdev_event_cb_complete)(struct spdk_vhost_dev *vdev, void *ctx);
552 
553 #define SPDK_VIRTIO_BLK_TRSTRING_MAX_LEN 32
554 
555 struct spdk_virtio_blk_transport_ops {
556 	/**
557 	 * Transport name
558 	 */
559 	char name[SPDK_VIRTIO_BLK_TRSTRING_MAX_LEN];
560 
561 	/**
562 	 * Create a transport for the given transport opts
563 	 */
564 	struct spdk_virtio_blk_transport *(*create)(const struct spdk_json_val *params);
565 
566 	/**
567 	 * Dump transport-specific opts into JSON
568 	 */
569 	void (*dump_opts)(struct spdk_virtio_blk_transport *transport, struct spdk_json_write_ctx *w);
570 
571 	/**
572 	 * Destroy the transport
573 	 */
574 	int (*destroy)(struct spdk_virtio_blk_transport *transport,
575 		       spdk_vhost_fini_cb cb_fn);
576 
577 	/**
578 	 * Create vhost block controller
579 	 */
580 	int (*create_ctrlr)(struct spdk_vhost_dev *vdev, struct spdk_cpuset *cpumask,
581 			    const char *address, const struct spdk_json_val *params,
582 			    void *custom_opts);
583 
584 	/**
585 	 * Destroy vhost block controller
586 	 */
587 	int (*destroy_ctrlr)(struct spdk_vhost_dev *vdev);
588 
589 	/*
590 	 * Signal removal of the bdev.
591 	 */
592 	void (*bdev_event)(enum spdk_bdev_event_type type, struct spdk_vhost_dev *vdev,
593 			   bdev_event_cb_complete cb, void *cb_arg);
594 
595 	/**
596 	 * Set coalescing parameters.
597 	 */
598 	int (*set_coalescing)(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
599 			      uint32_t iops_threshold);
600 
601 	/**
602 	 * Get coalescing parameters.
603 	 */
604 	void (*get_coalescing)(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
605 			       uint32_t *iops_threshold);
606 };
607 
608 struct spdk_virtio_blk_transport {
609 	const struct spdk_virtio_blk_transport_ops	*ops;
610 	TAILQ_ENTRY(spdk_virtio_blk_transport)		tailq;
611 };
612 
613 struct virtio_blk_transport_ops_list_element {
614 	struct spdk_virtio_blk_transport_ops			ops;
615 	TAILQ_ENTRY(virtio_blk_transport_ops_list_element)	link;
616 };
617 
618 void virtio_blk_transport_register(const struct spdk_virtio_blk_transport_ops *ops);
619 int virtio_blk_transport_create(const char *transport_name, const struct spdk_json_val *params);
620 int virtio_blk_transport_destroy(struct spdk_virtio_blk_transport *transport,
621 				 spdk_vhost_fini_cb cb_fn);
622 struct spdk_virtio_blk_transport *virtio_blk_transport_get_first(void);
623 struct spdk_virtio_blk_transport *virtio_blk_transport_get_next(
624 	struct spdk_virtio_blk_transport *transport);
625 void virtio_blk_transport_dump_opts(struct spdk_virtio_blk_transport *transport,
626 				    struct spdk_json_write_ctx *w);
627 struct spdk_virtio_blk_transport *virtio_blk_tgt_get_transport(const char *transport_name);
628 const struct spdk_virtio_blk_transport_ops *virtio_blk_get_transport_ops(
629 	const char *transport_name);
630 
631 void vhost_session_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
632 
633 /*
634  * Macro used to register new transports.
635  */
636 #define SPDK_VIRTIO_BLK_TRANSPORT_REGISTER(name, transport_ops) \
637 static void __attribute__((constructor)) _virtio_blk_transport_register_##name(void) \
638 { \
639 	virtio_blk_transport_register(transport_ops); \
640 }
641 
642 #endif /* SPDK_VHOST_INTERNAL_H */
643