xref: /spdk/lib/vhost/vhost_internal.h (revision 7025ceb9c119a6da0b6ee2013b6ae94b51fac2df)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #ifndef SPDK_VHOST_INTERNAL_H
7 #define SPDK_VHOST_INTERNAL_H
8 #include <linux/virtio_config.h>
9 
10 #include "spdk/stdinc.h"
11 
12 #include <rte_vhost.h>
13 
14 #include "spdk_internal/vhost_user.h"
15 #include "spdk/bdev.h"
16 #include "spdk/log.h"
17 #include "spdk/util.h"
18 #include "spdk/rpc.h"
19 #include "spdk/config.h"
20 
21 #define SPDK_VHOST_MAX_VQUEUES	256
22 #define SPDK_VHOST_MAX_VQ_SIZE	1024
23 
24 #define SPDK_VHOST_SCSI_CTRLR_MAX_DEVS 8
25 
26 #define SPDK_VHOST_IOVS_MAX 129
27 
28 #define SPDK_VHOST_VQ_MAX_SUBMISSIONS	32
29 
30 /*
31  * Rate at which stats are checked for interrupt coalescing.
32  */
33 #define SPDK_VHOST_STATS_CHECK_INTERVAL_MS 10
34 /*
35  * Default threshold at which interrupts start to be coalesced.
36  */
37 #define SPDK_VHOST_VQ_IOPS_COALESCING_THRESHOLD 60000
38 
39 /*
40  * Currently coalescing is not used by default.
41  * Setting this to value > 0 here or by RPC will enable coalescing.
42  */
43 #define SPDK_VHOST_COALESCING_DELAY_BASE_US 0
44 
45 #define SPDK_VHOST_FEATURES ((1ULL << VHOST_F_LOG_ALL) | \
46 	(1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
47 	(1ULL << VIRTIO_F_VERSION_1) | \
48 	(1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \
49 	(1ULL << VIRTIO_RING_F_EVENT_IDX) | \
50 	(1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
51 	(1ULL << VIRTIO_F_ANY_LAYOUT))
52 
53 #define SPDK_VHOST_DISABLED_FEATURES ((1ULL << VIRTIO_RING_F_EVENT_IDX) | \
54 	(1ULL << VIRTIO_F_NOTIFY_ON_EMPTY))
55 
56 #define VRING_DESC_F_AVAIL	(1ULL << VRING_PACKED_DESC_F_AVAIL)
57 #define VRING_DESC_F_USED	(1ULL << VRING_PACKED_DESC_F_USED)
58 #define VRING_DESC_F_AVAIL_USED	(VRING_DESC_F_AVAIL | VRING_DESC_F_USED)
59 
60 typedef struct rte_vhost_resubmit_desc spdk_vhost_resubmit_desc;
61 typedef struct rte_vhost_resubmit_info spdk_vhost_resubmit_info;
62 typedef struct rte_vhost_inflight_desc_packed	spdk_vhost_inflight_desc;
63 
64 struct spdk_vhost_virtqueue {
65 	struct rte_vhost_vring vring;
66 	struct rte_vhost_ring_inflight vring_inflight;
67 	uint16_t last_avail_idx;
68 	uint16_t last_used_idx;
69 
70 	struct {
71 		/* To mark a descriptor as available in packed ring
72 		 * Equal to avail_wrap_counter in spec.
73 		 */
74 		uint8_t avail_phase	: 1;
75 		/* To mark a descriptor as used in packed ring
76 		 * Equal to used_wrap_counter in spec.
77 		 */
78 		uint8_t used_phase	: 1;
79 		uint8_t padding		: 5;
80 		bool packed_ring	: 1;
81 	} packed;
82 
83 	void *tasks;
84 
85 	/* Request count from last stats check */
86 	uint32_t req_cnt;
87 
88 	/* Request count from last event */
89 	uint16_t used_req_cnt;
90 
91 	/* How long interrupt is delayed */
92 	uint32_t irq_delay_time;
93 
94 	/* Next time when we need to send event */
95 	uint64_t next_event_time;
96 
97 	/* Associated vhost_virtqueue in the virtio device's virtqueue list */
98 	uint32_t vring_idx;
99 
100 	struct spdk_vhost_session *vsession;
101 
102 	struct spdk_interrupt *intr;
103 } __attribute((aligned(SPDK_CACHE_LINE_SIZE)));
104 
105 struct spdk_vhost_session {
106 	struct spdk_vhost_dev *vdev;
107 
108 	/* rte_vhost connection ID. */
109 	int vid;
110 
111 	/* Unique session ID. */
112 	uint64_t id;
113 	/* Unique session name. */
114 	char *name;
115 
116 	bool started;
117 	bool starting;
118 	bool interrupt_mode;
119 	bool needs_restart;
120 
121 	struct rte_vhost_memory *mem;
122 
123 	int task_cnt;
124 
125 	uint16_t max_queues;
126 	/* Maximum number of queues before restart, used with 'needs_restart' flag */
127 	uint16_t original_max_queues;
128 
129 	uint64_t negotiated_features;
130 
131 	/* Local copy of device coalescing settings. */
132 	uint32_t coalescing_delay_time_base;
133 	uint32_t coalescing_io_rate_threshold;
134 
135 	/* Next time when stats for event coalescing will be checked. */
136 	uint64_t next_stats_check_time;
137 
138 	/* Interval used for event coalescing checking. */
139 	uint64_t stats_check_interval;
140 
141 	/* Session's stop poller will only try limited times to destroy the session. */
142 	uint32_t stop_retry_count;
143 
144 	/**
145 	 * DPDK calls our callbacks synchronously but the work those callbacks
146 	 * perform needs to be async. Luckily, all DPDK callbacks are called on
147 	 * a DPDK-internal pthread and only related to the current session, so we'll
148 	 * just wait on a semaphore of this session in there.
149 	 */
150 	sem_t dpdk_sem;
151 
152 	/** Return code for the current DPDK callback */
153 	int dpdk_response;
154 
155 	struct spdk_vhost_virtqueue virtqueue[SPDK_VHOST_MAX_VQUEUES];
156 
157 	TAILQ_ENTRY(spdk_vhost_session) tailq;
158 };
159 
160 struct spdk_vhost_user_dev {
161 	struct spdk_vhost_dev *vdev;
162 
163 	const struct spdk_vhost_user_dev_backend *user_backend;
164 
165 	/* Saved original values used to setup coalescing to avoid integer
166 	 * rounding issues during save/load config.
167 	 */
168 	uint32_t coalescing_delay_us;
169 	uint32_t coalescing_iops_threshold;
170 
171 	bool registered;
172 
173 	/* Use this lock to protect multiple sessions. */
174 	pthread_mutex_t lock;
175 
176 	/* Current connections to the device */
177 	TAILQ_HEAD(, spdk_vhost_session) vsessions;
178 
179 	/* Increment-only session counter */
180 	uint64_t vsessions_num;
181 
182 	/* Number of pending asynchronous operations */
183 	uint32_t pending_async_op_num;
184 };
185 
186 struct spdk_vhost_dev {
187 	char *name;
188 	char *path;
189 
190 	struct spdk_thread *thread;
191 
192 	uint64_t virtio_features;
193 	uint64_t disabled_features;
194 	uint64_t protocol_features;
195 
196 	const struct spdk_vhost_dev_backend *backend;
197 
198 	/* Context passed from transport */
199 	void *ctxt;
200 
201 	TAILQ_ENTRY(spdk_vhost_dev) tailq;
202 };
203 
204 static inline struct spdk_vhost_user_dev *
205 to_user_dev(struct spdk_vhost_dev *vdev)
206 {
207 	assert(vdev != NULL);
208 	return vdev->ctxt;
209 }
210 
211 /**
212  * \param vdev vhost device.
213  * \param vsession vhost session.
214  * \param arg user-provided parameter.
215  *
216  * \return negative values will break the foreach call, meaning
217  * the function won't be called again. Return codes zero and
218  * positive don't have any effect.
219  */
220 typedef int (*spdk_vhost_session_fn)(struct spdk_vhost_dev *vdev,
221 				     struct spdk_vhost_session *vsession,
222 				     void *arg);
223 
224 /**
225  * \param vdev vhost device.
226  * \param arg user-provided parameter.
227  */
228 typedef void (*spdk_vhost_dev_fn)(struct spdk_vhost_dev *vdev, void *arg);
229 
230 struct spdk_vhost_user_dev_backend {
231 	/**
232 	 * Size of additional per-session context data
233 	 * allocated whenever a new client connects.
234 	 */
235 	size_t session_ctx_size;
236 
237 	spdk_vhost_session_fn start_session;
238 	spdk_vhost_session_fn stop_session;
239 	int (*alloc_vq_tasks)(struct spdk_vhost_session *vsession, uint16_t qid);
240 	void (*register_vq_interrupt)(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq);
241 };
242 
243 enum vhost_backend_type {
244 	VHOST_BACKEND_BLK = 0,
245 	VHOST_BACKEND_SCSI,
246 };
247 
248 struct spdk_vhost_dev_backend {
249 	enum vhost_backend_type type;
250 
251 	int (*vhost_get_config)(struct spdk_vhost_dev *vdev, uint8_t *config, uint32_t len);
252 	int (*vhost_set_config)(struct spdk_vhost_dev *vdev, uint8_t *config,
253 				uint32_t offset, uint32_t size, uint32_t flags);
254 
255 	void (*dump_info_json)(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
256 	void (*write_config_json)(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
257 	int (*remove_device)(struct spdk_vhost_dev *vdev);
258 	int (*set_coalescing)(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
259 			      uint32_t iops_threshold);
260 	void (*get_coalescing)(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
261 			       uint32_t *iops_threshold);
262 };
263 
264 void *vhost_gpa_to_vva(struct spdk_vhost_session *vsession, uint64_t addr, uint64_t len);
265 
266 uint16_t vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *vq, uint16_t *reqs,
267 				 uint16_t reqs_len);
268 
269 /**
270  * Get a virtio split descriptor at given index in given virtqueue.
271  * The descriptor will provide access to the entire descriptor
272  * chain. The subsequent descriptors are accessible via
273  * \c spdk_vhost_vring_desc_get_next.
274  * \param vsession vhost session
275  * \param vq virtqueue
276  * \param req_idx descriptor index
277  * \param desc pointer to be set to the descriptor
278  * \param desc_table descriptor table to be used with
279  * \c spdk_vhost_vring_desc_get_next. This might be either
280  * default virtqueue descriptor table or per-chain indirect
281  * table.
282  * \param desc_table_size size of the *desc_table*
283  * \return 0 on success, -1 if given index is invalid.
284  * If -1 is returned, the content of params is undefined.
285  */
286 int vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq,
287 		      uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
288 		      uint32_t *desc_table_size);
289 
290 /**
291  * Get a virtio packed descriptor at given index in given virtqueue.
292  * The descriptor will provide access to the entire descriptor
293  * chain. The subsequent descriptors are accessible via
294  * \c vhost_vring_packed_desc_get_next.
295  * \param vsession vhost session
296  * \param vq virtqueue
297  * \param req_idx descriptor index
298  * \param desc pointer to be set to the descriptor
299  * \param desc_table descriptor table to be used with
300  * \c spdk_vhost_vring_desc_get_next. This might be either
301  * \c NULL or per-chain indirect table.
302  * \param desc_table_size size of the *desc_table*
303  * \return 0 on success, -1 if given index is invalid.
304  * If -1 is returned, the content of params is undefined.
305  */
306 int vhost_vq_get_desc_packed(struct spdk_vhost_session *vsession,
307 			     struct spdk_vhost_virtqueue *virtqueue,
308 			     uint16_t req_idx, struct vring_packed_desc **desc,
309 			     struct vring_packed_desc **desc_table, uint32_t *desc_table_size);
310 
311 int vhost_inflight_queue_get_desc(struct spdk_vhost_session *vsession,
312 				  spdk_vhost_inflight_desc *desc_array,
313 				  uint16_t req_idx, spdk_vhost_inflight_desc **desc,
314 				  struct vring_packed_desc  **desc_table, uint32_t *desc_table_size);
315 
316 /**
317  * Send IRQ/call client (if pending) for \c vq.
318  * \param vsession vhost session
319  * \param vq virtqueue
320  * \return
321  *   0 - if no interrupt was signalled
322  *   1 - if interrupt was signalled
323  */
324 int vhost_vq_used_signal(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq);
325 
326 /**
327  * Send IRQs for the queue that need to be signaled.
328  * \param vq virtqueue
329  */
330 void vhost_session_vq_used_signal(struct spdk_vhost_virtqueue *virtqueue);
331 
332 void vhost_vq_used_ring_enqueue(struct spdk_vhost_session *vsession,
333 				struct spdk_vhost_virtqueue *vq,
334 				uint16_t id, uint32_t len);
335 
336 /**
337  * Enqueue the entry to the used ring when device complete the request.
338  * \param vsession vhost session
339  * \param vq virtqueue
340  * \req_idx descriptor index. It's the first index of this descriptor chain.
341  * \num_descs descriptor count. It's the count of the number of buffers in the chain.
342  * \buffer_id descriptor buffer ID.
343  * \length device write length. Specify the length of the buffer that has been initialized
344  * (written to) by the device
345  * \inflight_head the head idx of this IO inflight desc chain.
346  */
347 void vhost_vq_packed_ring_enqueue(struct spdk_vhost_session *vsession,
348 				  struct spdk_vhost_virtqueue *virtqueue,
349 				  uint16_t num_descs, uint16_t buffer_id,
350 				  uint32_t length, uint16_t inflight_head);
351 
352 /**
353  * Get subsequent descriptor from given table.
354  * \param desc current descriptor, will be set to the
355  * next descriptor (NULL in case this is the last
356  * descriptor in the chain or the next desc is invalid)
357  * \param desc_table descriptor table
358  * \param desc_table_size size of the *desc_table*
359  * \return 0 on success, -1 if given index is invalid
360  * The *desc* param will be set regardless of the
361  * return value.
362  */
363 int vhost_vring_desc_get_next(struct vring_desc **desc,
364 			      struct vring_desc *desc_table, uint32_t desc_table_size);
365 static inline bool
366 vhost_vring_desc_is_wr(struct vring_desc *cur_desc)
367 {
368 	return !!(cur_desc->flags & VRING_DESC_F_WRITE);
369 }
370 
371 int vhost_vring_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
372 			    uint16_t *iov_index, const struct vring_desc *desc);
373 
374 bool vhost_vq_packed_ring_is_avail(struct spdk_vhost_virtqueue *virtqueue);
375 
376 /**
377  * Get subsequent descriptor from vq or desc table.
378  * \param desc current descriptor, will be set to the
379  * next descriptor (NULL in case this is the last
380  * descriptor in the chain or the next desc is invalid)
381  * \req_idx index of current desc, will be set to the next
382  * index. If desc_table != NULL the req_idx is the the vring index
383  * or the req_idx is the desc_table index.
384  * \param desc_table descriptor table
385  * \param desc_table_size size of the *desc_table*
386  * \return 0 on success, -1 if given index is invalid
387  * The *desc* param will be set regardless of the
388  * return value.
389  */
390 int vhost_vring_packed_desc_get_next(struct vring_packed_desc **desc, uint16_t *req_idx,
391 				     struct spdk_vhost_virtqueue *vq,
392 				     struct vring_packed_desc *desc_table,
393 				     uint32_t desc_table_size);
394 
395 bool vhost_vring_packed_desc_is_wr(struct vring_packed_desc *cur_desc);
396 
397 int vhost_vring_packed_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
398 				   uint16_t *iov_index, const struct vring_packed_desc *desc);
399 
400 bool vhost_vring_inflight_desc_is_wr(spdk_vhost_inflight_desc *cur_desc);
401 
402 int vhost_vring_inflight_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
403 				     uint16_t *iov_index, const spdk_vhost_inflight_desc *desc);
404 
405 uint16_t vhost_vring_packed_desc_get_buffer_id(struct spdk_vhost_virtqueue *vq, uint16_t req_idx,
406 		uint16_t *num_descs);
407 
408 static inline bool
409 __attribute__((always_inline))
410 vhost_dev_has_feature(struct spdk_vhost_session *vsession, unsigned feature_id)
411 {
412 	return vsession->negotiated_features & (1ULL << feature_id);
413 }
414 
415 int vhost_scsi_controller_start(const char *name);
416 
417 int vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
418 		       const struct spdk_json_val *params, const struct spdk_vhost_dev_backend *backend,
419 		       const struct spdk_vhost_user_dev_backend *user_backend, bool delay);
420 
421 int vhost_dev_unregister(struct spdk_vhost_dev *vdev);
422 
423 void vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
424 
425 /*
426  * Set vhost session to run in interrupt or poll mode
427  */
428 void vhost_user_session_set_interrupt_mode(struct spdk_vhost_session *vsession,
429 		bool interrupt_mode);
430 
431 /*
432  * Memory registration functions used in start/stop device callbacks
433  */
434 void vhost_session_mem_register(struct rte_vhost_memory *mem);
435 void vhost_session_mem_unregister(struct rte_vhost_memory *mem);
436 
437 /*
438  * Call a function for each session of the provided vhost device.
439  * The function will be called one-by-one on each session's thread.
440  *
441  * \param vdev vhost device
442  * \param fn function to call on each session's thread
443  * \param cpl_fn function to be called at the end of the iteration on
444  * the vhost management thread.
445  * Optional, can be NULL.
446  * \param arg additional argument to the both callbacks
447  */
448 void vhost_user_dev_foreach_session(struct spdk_vhost_dev *dev,
449 				    spdk_vhost_session_fn fn,
450 				    spdk_vhost_dev_fn cpl_fn,
451 				    void *arg);
452 
453 /**
454  * Finish a blocking vhost_user_wait_for_session_stop() call and finally
455  * stop the session. This must be called on the session's lcore which
456  * used to receive all session-related messages (e.g. from
457  * vhost_user_dev_foreach_session()). After this call, the session-
458  * related messages will be once again processed by any arbitrary thread.
459  *
460  * Must be called under the vhost user device's session access lock.
461  *
462  * \param vsession vhost session
463  * \param response return code
464  */
465 void vhost_user_session_stop_done(struct spdk_vhost_session *vsession, int response);
466 
467 struct spdk_vhost_session *vhost_session_find_by_vid(int vid);
468 void vhost_session_install_rte_compat_hooks(struct spdk_vhost_session *vsession);
469 int vhost_register_unix_socket(const char *path, const char *ctrl_name,
470 			       uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features);
471 int vhost_driver_unregister(const char *path);
472 int vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
473 int vhost_get_negotiated_features(int vid, uint64_t *negotiated_features);
474 
475 int remove_vhost_controller(struct spdk_vhost_dev *vdev);
476 
477 struct spdk_io_channel *vhost_blk_get_io_channel(struct spdk_vhost_dev *vdev);
478 void vhost_blk_put_io_channel(struct spdk_io_channel *ch);
479 
480 /* The spdk_bdev pointer should only be used to retrieve
481  * the device properties, ex. number of blocks or I/O type supported. */
482 struct spdk_bdev *vhost_blk_get_bdev(struct spdk_vhost_dev *vdev);
483 
484 /* Function calls from vhost.c to rte_vhost_user.c,
485  * shall removed once virtio transport abstraction is complete. */
486 int vhost_user_session_set_coalescing(struct spdk_vhost_dev *dev,
487 				      struct spdk_vhost_session *vsession, void *ctx);
488 int vhost_user_dev_set_coalescing(struct spdk_vhost_user_dev *user_dev, uint32_t delay_base_us,
489 				  uint32_t iops_threshold);
490 int vhost_user_dev_create(struct spdk_vhost_dev *vdev, const char *name,
491 			  struct spdk_cpuset *cpumask,
492 			  const struct spdk_vhost_user_dev_backend *user_backend, bool dealy);
493 int vhost_user_dev_init(struct spdk_vhost_dev *vdev, const char *name,
494 			struct spdk_cpuset *cpumask, const struct spdk_vhost_user_dev_backend *user_backend);
495 int vhost_user_dev_start(struct spdk_vhost_dev *vdev);
496 int vhost_user_dev_unregister(struct spdk_vhost_dev *vdev);
497 int vhost_user_init(void);
498 void vhost_user_fini(spdk_vhost_fini_cb vhost_cb);
499 int vhost_user_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
500 			      uint32_t iops_threshold);
501 void vhost_user_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
502 			       uint32_t *iops_threshold);
503 
504 int virtio_blk_construct_ctrlr(struct spdk_vhost_dev *vdev, const char *address,
505 			       struct spdk_cpuset *cpumask, const struct spdk_json_val *params,
506 			       const struct spdk_vhost_user_dev_backend *user_backend);
507 int virtio_blk_destroy_ctrlr(struct spdk_vhost_dev *vdev);
508 
509 struct spdk_vhost_blk_task;
510 
511 typedef void (*virtio_blk_request_cb)(uint8_t status, struct spdk_vhost_blk_task *task,
512 				      void *cb_arg);
513 
514 struct spdk_vhost_blk_task {
515 	struct spdk_bdev_io *bdev_io;
516 	virtio_blk_request_cb cb;
517 	void *cb_arg;
518 
519 	volatile uint8_t *status;
520 
521 	/* for io wait */
522 	struct spdk_bdev_io_wait_entry bdev_io_wait;
523 	struct spdk_io_channel *bdev_io_wait_ch;
524 	struct spdk_vhost_dev *bdev_io_wait_vdev;
525 
526 	/** Number of bytes that were written. */
527 	uint32_t used_len;
528 	uint16_t iovcnt;
529 	struct iovec iovs[SPDK_VHOST_IOVS_MAX];
530 
531 	/** Size of whole payload in bytes */
532 	uint32_t payload_size;
533 };
534 
535 int virtio_blk_process_request(struct spdk_vhost_dev *vdev, struct spdk_io_channel *ch,
536 			       struct spdk_vhost_blk_task *task, virtio_blk_request_cb cb, void *cb_arg);
537 
538 typedef void (*bdev_event_cb_complete)(struct spdk_vhost_dev *vdev, void *ctx);
539 
540 #define SPDK_VIRTIO_BLK_TRSTRING_MAX_LEN 32
541 
542 struct spdk_virtio_blk_transport_ops {
543 	/**
544 	 * Transport name
545 	 */
546 	char name[SPDK_VIRTIO_BLK_TRSTRING_MAX_LEN];
547 
548 	/**
549 	 * Create a transport for the given transport opts
550 	 */
551 	struct spdk_virtio_blk_transport *(*create)(const struct spdk_json_val *params);
552 
553 	/**
554 	 * Dump transport-specific opts into JSON
555 	 */
556 	void (*dump_opts)(struct spdk_virtio_blk_transport *transport, struct spdk_json_write_ctx *w);
557 
558 	/**
559 	 * Destroy the transport
560 	 */
561 	int (*destroy)(struct spdk_virtio_blk_transport *transport,
562 		       spdk_vhost_fini_cb cb_fn);
563 
564 	/**
565 	 * Create vhost block controller
566 	 */
567 	int (*create_ctrlr)(struct spdk_vhost_dev *vdev, struct spdk_cpuset *cpumask,
568 			    const char *address, const struct spdk_json_val *params,
569 			    void *custom_opts);
570 
571 	/**
572 	 * Destroy vhost block controller
573 	 */
574 	int (*destroy_ctrlr)(struct spdk_vhost_dev *vdev);
575 
576 	/*
577 	 * Signal removal of the bdev.
578 	 */
579 	void (*bdev_event)(enum spdk_bdev_event_type type, struct spdk_vhost_dev *vdev,
580 			   bdev_event_cb_complete cb, void *cb_arg);
581 
582 	/**
583 	 * Set coalescing parameters.
584 	 */
585 	int (*set_coalescing)(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
586 			      uint32_t iops_threshold);
587 
588 	/**
589 	 * Get coalescing parameters.
590 	 */
591 	void (*get_coalescing)(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
592 			       uint32_t *iops_threshold);
593 };
594 
595 struct spdk_virtio_blk_transport {
596 	const struct spdk_virtio_blk_transport_ops	*ops;
597 	TAILQ_ENTRY(spdk_virtio_blk_transport)		tailq;
598 };
599 
600 struct virtio_blk_transport_ops_list_element {
601 	struct spdk_virtio_blk_transport_ops			ops;
602 	TAILQ_ENTRY(virtio_blk_transport_ops_list_element)	link;
603 };
604 
605 void virtio_blk_transport_register(const struct spdk_virtio_blk_transport_ops *ops);
606 int virtio_blk_transport_create(const char *transport_name, const struct spdk_json_val *params);
607 int virtio_blk_transport_destroy(struct spdk_virtio_blk_transport *transport,
608 				 spdk_vhost_fini_cb cb_fn);
609 struct spdk_virtio_blk_transport *virtio_blk_transport_get_first(void);
610 struct spdk_virtio_blk_transport *virtio_blk_transport_get_next(
611 	struct spdk_virtio_blk_transport *transport);
612 void virtio_blk_transport_dump_opts(struct spdk_virtio_blk_transport *transport,
613 				    struct spdk_json_write_ctx *w);
614 struct spdk_virtio_blk_transport *virtio_blk_tgt_get_transport(const char *transport_name);
615 const struct spdk_virtio_blk_transport_ops *virtio_blk_get_transport_ops(
616 	const char *transport_name);
617 
618 void vhost_session_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
619 
620 /*
621  * Macro used to register new transports.
622  */
623 #define SPDK_VIRTIO_BLK_TRANSPORT_REGISTER(name, transport_ops) \
624 static void __attribute__((constructor)) _virtio_blk_transport_register_##name(void) \
625 { \
626 	virtio_blk_transport_register(transport_ops); \
627 }
628 
629 #endif /* SPDK_VHOST_INTERNAL_H */
630