xref: /spdk/lib/vhost/vhost_internal.h (revision 510f4c134a21b45ff3a5add9ebc6c6cf7e49aeab)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #ifndef SPDK_VHOST_INTERNAL_H
7 #define SPDK_VHOST_INTERNAL_H
8 #include <linux/virtio_config.h>
9 
10 #include "spdk/stdinc.h"
11 
12 #include <rte_vhost.h>
13 
14 #include "spdk_internal/vhost_user.h"
15 #include "spdk/bdev.h"
16 #include "spdk/log.h"
17 #include "spdk/util.h"
18 #include "spdk/rpc.h"
19 #include "spdk/config.h"
20 
21 #define SPDK_VHOST_MAX_VQUEUES	256
22 #define SPDK_VHOST_MAX_VQ_SIZE	1024
23 
24 #define SPDK_VHOST_SCSI_CTRLR_MAX_DEVS 8
25 
26 #define SPDK_VHOST_IOVS_MAX 129
27 
28 #define SPDK_VHOST_VQ_MAX_SUBMISSIONS	32
29 
30 /*
31  * Rate at which stats are checked for interrupt coalescing.
32  */
33 #define SPDK_VHOST_STATS_CHECK_INTERVAL_MS 10
34 /*
35  * Default threshold at which interrupts start to be coalesced.
36  */
37 #define SPDK_VHOST_VQ_IOPS_COALESCING_THRESHOLD 60000
38 
39 /*
40  * Currently coalescing is not used by default.
41  * Setting this to value > 0 here or by RPC will enable coalescing.
42  */
43 #define SPDK_VHOST_COALESCING_DELAY_BASE_US 0
44 
45 #define SPDK_VHOST_FEATURES ((1ULL << VHOST_F_LOG_ALL) | \
46 	(1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
47 	(1ULL << VIRTIO_F_VERSION_1) | \
48 	(1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \
49 	(1ULL << VIRTIO_RING_F_EVENT_IDX) | \
50 	(1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
51 	(1ULL << VIRTIO_F_RING_PACKED) | \
52 	(1ULL << VIRTIO_F_ANY_LAYOUT))
53 
54 #define SPDK_VHOST_DISABLED_FEATURES ((1ULL << VIRTIO_RING_F_EVENT_IDX) | \
55 	(1ULL << VIRTIO_F_NOTIFY_ON_EMPTY))
56 
57 #define VRING_DESC_F_AVAIL	(1ULL << VRING_PACKED_DESC_F_AVAIL)
58 #define VRING_DESC_F_USED	(1ULL << VRING_PACKED_DESC_F_USED)
59 #define VRING_DESC_F_AVAIL_USED	(VRING_DESC_F_AVAIL | VRING_DESC_F_USED)
60 
61 typedef struct rte_vhost_resubmit_desc spdk_vhost_resubmit_desc;
62 typedef struct rte_vhost_resubmit_info spdk_vhost_resubmit_info;
63 typedef struct rte_vhost_inflight_desc_packed	spdk_vhost_inflight_desc;
64 
65 struct spdk_vhost_virtqueue {
66 	struct rte_vhost_vring vring;
67 	struct rte_vhost_ring_inflight vring_inflight;
68 	uint16_t last_avail_idx;
69 	uint16_t last_used_idx;
70 
71 	struct {
72 		/* To mark a descriptor as available in packed ring
73 		 * Equal to avail_wrap_counter in spec.
74 		 */
75 		uint8_t avail_phase	: 1;
76 		/* To mark a descriptor as used in packed ring
77 		 * Equal to used_wrap_counter in spec.
78 		 */
79 		uint8_t used_phase	: 1;
80 		uint8_t padding		: 5;
81 		bool packed_ring	: 1;
82 	} packed;
83 
84 	void *tasks;
85 
86 	/* Request count from last stats check */
87 	uint32_t req_cnt;
88 
89 	/* Request count from last event */
90 	uint16_t used_req_cnt;
91 
92 	/* How long interrupt is delayed */
93 	uint32_t irq_delay_time;
94 
95 	/* Next time when we need to send event */
96 	uint64_t next_event_time;
97 
98 	/* Associated vhost_virtqueue in the virtio device's virtqueue list */
99 	uint32_t vring_idx;
100 
101 	struct spdk_vhost_session *vsession;
102 
103 	struct spdk_interrupt *intr;
104 } __attribute((aligned(SPDK_CACHE_LINE_SIZE)));
105 
106 struct spdk_vhost_session {
107 	struct spdk_vhost_dev *vdev;
108 
109 	/* rte_vhost connection ID. */
110 	int vid;
111 
112 	/* Unique session ID. */
113 	uint64_t id;
114 	/* Unique session name. */
115 	char *name;
116 
117 	bool initialized;
118 	bool started;
119 	bool needs_restart;
120 	bool forced_polling;
121 	bool interrupt_mode;
122 
123 	struct rte_vhost_memory *mem;
124 
125 	int task_cnt;
126 
127 	uint16_t max_queues;
128 
129 	uint64_t negotiated_features;
130 
131 	/* Local copy of device coalescing settings. */
132 	uint32_t coalescing_delay_time_base;
133 	uint32_t coalescing_io_rate_threshold;
134 
135 	/* Next time when stats for event coalescing will be checked. */
136 	uint64_t next_stats_check_time;
137 
138 	/* Interval used for event coalescing checking. */
139 	uint64_t stats_check_interval;
140 
141 	/* Session's stop poller will only try limited times to destroy the session. */
142 	uint32_t stop_retry_count;
143 
144 	struct spdk_vhost_virtqueue virtqueue[SPDK_VHOST_MAX_VQUEUES];
145 
146 	TAILQ_ENTRY(spdk_vhost_session) tailq;
147 };
148 
149 struct spdk_vhost_user_dev {
150 	struct spdk_vhost_dev *vdev;
151 
152 	const struct spdk_vhost_user_dev_backend *user_backend;
153 
154 	/* Saved original values used to setup coalescing to avoid integer
155 	 * rounding issues during save/load config.
156 	 */
157 	uint32_t coalescing_delay_us;
158 	uint32_t coalescing_iops_threshold;
159 
160 	/* Current connections to the device */
161 	TAILQ_HEAD(, spdk_vhost_session) vsessions;
162 
163 	/* Increment-only session counter */
164 	uint64_t vsessions_num;
165 
166 	/* Number of started and actively polled sessions */
167 	uint32_t active_session_num;
168 
169 	/* Number of pending asynchronous operations */
170 	uint32_t pending_async_op_num;
171 };
172 
173 struct spdk_vhost_dev {
174 	char *name;
175 	char *path;
176 
177 	struct spdk_thread *thread;
178 	bool registered;
179 
180 	uint64_t virtio_features;
181 	uint64_t disabled_features;
182 	uint64_t protocol_features;
183 	bool packed_ring_recovery;
184 
185 	const struct spdk_vhost_dev_backend *backend;
186 
187 	/* Context passed from transport */
188 	void *ctxt;
189 
190 	TAILQ_ENTRY(spdk_vhost_dev) tailq;
191 };
192 
193 /**
194  * \param vdev vhost device.
195  * \param vsession vhost session.
196  * \param arg user-provided parameter.
197  *
198  * \return negative values will break the foreach call, meaning
199  * the function won't be called again. Return codes zero and
200  * positive don't have any effect.
201  */
202 typedef int (*spdk_vhost_session_fn)(struct spdk_vhost_dev *vdev,
203 				     struct spdk_vhost_session *vsession,
204 				     void *arg);
205 
206 /**
207  * \param vdev vhost device.
208  * \param arg user-provided parameter.
209  */
210 typedef void (*spdk_vhost_dev_fn)(struct spdk_vhost_dev *vdev, void *arg);
211 
212 struct spdk_vhost_user_dev_backend {
213 	/**
214 	 * Size of additional per-session context data
215 	 * allocated whenever a new client connects.
216 	 */
217 	size_t session_ctx_size;
218 
219 	int (*start_session)(struct spdk_vhost_session *vsession);
220 	int (*stop_session)(struct spdk_vhost_session *vsession);
221 };
222 
223 enum vhost_backend_type {
224 	VHOST_BACKEND_BLK = 0,
225 	VHOST_BACKEND_SCSI,
226 };
227 
228 struct spdk_vhost_dev_backend {
229 	enum vhost_backend_type type;
230 
231 	int (*vhost_get_config)(struct spdk_vhost_dev *vdev, uint8_t *config, uint32_t len);
232 	int (*vhost_set_config)(struct spdk_vhost_dev *vdev, uint8_t *config,
233 				uint32_t offset, uint32_t size, uint32_t flags);
234 
235 	void (*dump_info_json)(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
236 	void (*write_config_json)(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
237 	int (*remove_device)(struct spdk_vhost_dev *vdev);
238 };
239 
240 void *vhost_gpa_to_vva(struct spdk_vhost_session *vsession, uint64_t addr, uint64_t len);
241 
242 uint16_t vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *vq, uint16_t *reqs,
243 				 uint16_t reqs_len);
244 
245 /**
246  * Get a virtio split descriptor at given index in given virtqueue.
247  * The descriptor will provide access to the entire descriptor
248  * chain. The subsequent descriptors are accessible via
249  * \c spdk_vhost_vring_desc_get_next.
250  * \param vsession vhost session
251  * \param vq virtqueue
252  * \param req_idx descriptor index
253  * \param desc pointer to be set to the descriptor
254  * \param desc_table descriptor table to be used with
255  * \c spdk_vhost_vring_desc_get_next. This might be either
256  * default virtqueue descriptor table or per-chain indirect
257  * table.
258  * \param desc_table_size size of the *desc_table*
259  * \return 0 on success, -1 if given index is invalid.
260  * If -1 is returned, the content of params is undefined.
261  */
262 int vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq,
263 		      uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
264 		      uint32_t *desc_table_size);
265 
266 /**
267  * Get a virtio packed descriptor at given index in given virtqueue.
268  * The descriptor will provide access to the entire descriptor
269  * chain. The subsequent descriptors are accessible via
270  * \c vhost_vring_packed_desc_get_next.
271  * \param vsession vhost session
272  * \param vq virtqueue
273  * \param req_idx descriptor index
274  * \param desc pointer to be set to the descriptor
275  * \param desc_table descriptor table to be used with
276  * \c spdk_vhost_vring_desc_get_next. This might be either
277  * \c NULL or per-chain indirect table.
278  * \param desc_table_size size of the *desc_table*
279  * \return 0 on success, -1 if given index is invalid.
280  * If -1 is returned, the content of params is undefined.
281  */
282 int vhost_vq_get_desc_packed(struct spdk_vhost_session *vsession,
283 			     struct spdk_vhost_virtqueue *virtqueue,
284 			     uint16_t req_idx, struct vring_packed_desc **desc,
285 			     struct vring_packed_desc **desc_table, uint32_t *desc_table_size);
286 
287 int vhost_inflight_queue_get_desc(struct spdk_vhost_session *vsession,
288 				  spdk_vhost_inflight_desc *desc_array,
289 				  uint16_t req_idx, spdk_vhost_inflight_desc **desc,
290 				  struct vring_packed_desc  **desc_table, uint32_t *desc_table_size);
291 
292 /**
293  * Send IRQ/call client (if pending) for \c vq.
294  * \param vsession vhost session
295  * \param vq virtqueue
296  * \return
297  *   0 - if no interrupt was signalled
298  *   1 - if interrupt was signalled
299  */
300 int vhost_vq_used_signal(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq);
301 
302 
303 /**
304  * Send IRQs for all queues that need to be signaled.
305  * \param vsession vhost session
306  * \param vq virtqueue
307  */
308 void vhost_session_used_signal(struct spdk_vhost_session *vsession);
309 
310 /**
311  * Send IRQs for the queue that need to be signaled.
312  * \param vq virtqueue
313  */
314 void vhost_session_vq_used_signal(struct spdk_vhost_virtqueue *virtqueue);
315 
316 void vhost_vq_used_ring_enqueue(struct spdk_vhost_session *vsession,
317 				struct spdk_vhost_virtqueue *vq,
318 				uint16_t id, uint32_t len);
319 
320 /**
321  * Enqueue the entry to the used ring when device complete the request.
322  * \param vsession vhost session
323  * \param vq virtqueue
324  * \req_idx descriptor index. It's the first index of this descriptor chain.
325  * \num_descs descriptor count. It's the count of the number of buffers in the chain.
326  * \buffer_id descriptor buffer ID.
327  * \length device write length. Specify the length of the buffer that has been initialized
328  * (written to) by the device
329  * \inflight_head the head idx of this IO inflight desc chain.
330  */
331 void vhost_vq_packed_ring_enqueue(struct spdk_vhost_session *vsession,
332 				  struct spdk_vhost_virtqueue *virtqueue,
333 				  uint16_t num_descs, uint16_t buffer_id,
334 				  uint32_t length, uint16_t inflight_head);
335 
336 /**
337  * Get subsequent descriptor from given table.
338  * \param desc current descriptor, will be set to the
339  * next descriptor (NULL in case this is the last
340  * descriptor in the chain or the next desc is invalid)
341  * \param desc_table descriptor table
342  * \param desc_table_size size of the *desc_table*
343  * \return 0 on success, -1 if given index is invalid
344  * The *desc* param will be set regardless of the
345  * return value.
346  */
347 int vhost_vring_desc_get_next(struct vring_desc **desc,
348 			      struct vring_desc *desc_table, uint32_t desc_table_size);
349 static inline bool
350 vhost_vring_desc_is_wr(struct vring_desc *cur_desc)
351 {
352 	return !!(cur_desc->flags & VRING_DESC_F_WRITE);
353 }
354 
355 int vhost_vring_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
356 			    uint16_t *iov_index, const struct vring_desc *desc);
357 
358 bool vhost_vq_packed_ring_is_avail(struct spdk_vhost_virtqueue *virtqueue);
359 
360 /**
361  * Get subsequent descriptor from vq or desc table.
362  * \param desc current descriptor, will be set to the
363  * next descriptor (NULL in case this is the last
364  * descriptor in the chain or the next desc is invalid)
365  * \req_idx index of current desc, will be set to the next
366  * index. If desc_table != NULL the req_idx is the the vring index
367  * or the req_idx is the desc_table index.
368  * \param desc_table descriptor table
369  * \param desc_table_size size of the *desc_table*
370  * \return 0 on success, -1 if given index is invalid
371  * The *desc* param will be set regardless of the
372  * return value.
373  */
374 int vhost_vring_packed_desc_get_next(struct vring_packed_desc **desc, uint16_t *req_idx,
375 				     struct spdk_vhost_virtqueue *vq,
376 				     struct vring_packed_desc *desc_table,
377 				     uint32_t desc_table_size);
378 
379 bool vhost_vring_packed_desc_is_wr(struct vring_packed_desc *cur_desc);
380 
381 int vhost_vring_packed_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
382 				   uint16_t *iov_index, const struct vring_packed_desc *desc);
383 
384 bool vhost_vring_inflight_desc_is_wr(spdk_vhost_inflight_desc *cur_desc);
385 
386 int vhost_vring_inflight_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
387 				     uint16_t *iov_index, const spdk_vhost_inflight_desc *desc);
388 
389 uint16_t vhost_vring_packed_desc_get_buffer_id(struct spdk_vhost_virtqueue *vq, uint16_t req_idx,
390 		uint16_t *num_descs);
391 
392 static inline bool
393 __attribute__((always_inline))
394 vhost_dev_has_feature(struct spdk_vhost_session *vsession, unsigned feature_id)
395 {
396 	return vsession->negotiated_features & (1ULL << feature_id);
397 }
398 
399 int vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
400 		       const struct spdk_json_val *params,
401 		       const struct spdk_vhost_dev_backend *backend,
402 		       const struct spdk_vhost_user_dev_backend *user_backend);
403 int vhost_dev_unregister(struct spdk_vhost_dev *vdev);
404 
405 void vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
406 
407 /*
408  * Set vhost session to run in interrupt or poll mode
409  */
410 void vhost_user_session_set_interrupt_mode(struct spdk_vhost_session *vsession,
411 		bool interrupt_mode);
412 
413 /*
414  * Memory registration functions used in start/stop device callbacks
415  */
416 void vhost_session_mem_register(struct rte_vhost_memory *mem);
417 void vhost_session_mem_unregister(struct rte_vhost_memory *mem);
418 
419 /*
420  * Call a function for each session of the provided vhost device.
421  * The function will be called one-by-one on each session's thread.
422  *
423  * \param vdev vhost device
424  * \param fn function to call on each session's thread
425  * \param cpl_fn function to be called at the end of the iteration on
426  * the vhost management thread.
427  * Optional, can be NULL.
428  * \param arg additional argument to the both callbacks
429  */
430 void vhost_user_dev_foreach_session(struct spdk_vhost_dev *dev,
431 				    spdk_vhost_session_fn fn,
432 				    spdk_vhost_dev_fn cpl_fn,
433 				    void *arg);
434 
435 /**
436  * Call a function on the provided lcore and block until either
437  * vhost_user_session_start_done() or vhost_user_session_stop_done()
438  * is called.
439  *
440  * This must be called under the global vhost mutex, which this function
441  * will unlock for the time it's waiting. It's meant to be called only
442  * from start/stop session callbacks.
443  *
444  * \param vsession vhost session
445  * \param cb_fn the function to call. The void *arg parameter in cb_fn
446  * is always NULL.
447  * \param timeout_sec timeout in seconds. This function will still
448  * block after the timeout expires, but will print the provided errmsg.
449  * \param errmsg error message to print once the timeout expires
450  * \return return the code passed to spdk_vhost_session_event_done().
451  */
452 int vhost_user_session_send_event(struct spdk_vhost_session *vsession,
453 				  spdk_vhost_session_fn cb_fn, unsigned timeout_sec,
454 				  const char *errmsg);
455 
456 /**
457  * Finish a blocking spdk_vhost_user_session_send_event() call and finally
458  * start the session. This must be called on the target lcore, which
459  * will now receive all session-related messages (e.g. from
460  * vhost_user_dev_foreach_session()).
461  *
462  * Must be called under the global vhost lock.
463  *
464  * \param vsession vhost session
465  * \param response return code
466  */
467 void vhost_user_session_start_done(struct spdk_vhost_session *vsession, int response);
468 
469 /**
470  * Finish a blocking spdk_vhost_user_session_send_event() call and finally
471  * stop the session. This must be called on the session's lcore which
472  * used to receive all session-related messages (e.g. from
473  * vhost_user_dev_foreach_session()). After this call, the session-
474  * related messages will be once again processed by any arbitrary thread.
475  *
476  * Must be called under the global vhost lock.
477  *
478  * Must be called under the global vhost mutex.
479  *
480  * \param vsession vhost session
481  * \param response return code
482  */
483 void vhost_user_session_stop_done(struct spdk_vhost_session *vsession, int response);
484 
485 struct spdk_vhost_session *vhost_session_find_by_vid(int vid);
486 void vhost_session_install_rte_compat_hooks(struct spdk_vhost_session *vsession);
487 int vhost_register_unix_socket(const char *path, const char *ctrl_name,
488 			       uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features);
489 int vhost_driver_unregister(const char *path);
490 int vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
491 int vhost_get_negotiated_features(int vid, uint64_t *negotiated_features);
492 
493 int remove_vhost_controller(struct spdk_vhost_dev *vdev);
494 
495 struct spdk_io_channel *vhost_blk_get_io_channel(struct spdk_vhost_dev *vdev);
496 void vhost_blk_put_io_channel(struct spdk_io_channel *ch);
497 
498 /* The spdk_bdev pointer should only be used to retrieve
499  * the device properties, ex. number of blocks or I/O type supported. */
500 struct spdk_bdev *vhost_blk_get_bdev(struct spdk_vhost_dev *vdev);
501 
502 /* Function calls from vhost.c to rte_vhost_user.c,
503  * shall removed once virtio transport abstraction is complete. */
504 int vhost_user_session_set_coalescing(struct spdk_vhost_dev *dev,
505 				      struct spdk_vhost_session *vsession, void *ctx);
506 int vhost_user_dev_set_coalescing(struct spdk_vhost_user_dev *user_dev, uint32_t delay_base_us,
507 				  uint32_t iops_threshold);
508 int vhost_user_dev_register(struct spdk_vhost_dev *vdev, const char *name,
509 			    struct spdk_cpuset *cpumask, const struct spdk_vhost_user_dev_backend *user_backend);
510 int vhost_user_dev_unregister(struct spdk_vhost_dev *vdev);
511 int vhost_user_init(void);
512 void vhost_user_fini(spdk_vhost_fini_cb vhost_cb);
513 
514 int virtio_blk_construct_ctrlr(struct spdk_vhost_dev *vdev, const char *address,
515 			       struct spdk_cpuset *cpumask, const struct spdk_json_val *params,
516 			       const struct spdk_vhost_user_dev_backend *user_backend);
517 int virtio_blk_destroy_ctrlr(struct spdk_vhost_dev *vdev);
518 
519 struct spdk_vhost_blk_task;
520 
521 typedef void (*virtio_blk_request_cb)(uint8_t status, struct spdk_vhost_blk_task *task,
522 				      void *cb_arg);
523 
524 struct spdk_vhost_blk_task {
525 	struct spdk_bdev_io *bdev_io;
526 	virtio_blk_request_cb cb;
527 	void *cb_arg;
528 
529 	volatile uint8_t *status;
530 
531 	/* for io wait */
532 	struct spdk_bdev_io_wait_entry bdev_io_wait;
533 	struct spdk_io_channel *bdev_io_wait_ch;
534 	struct spdk_vhost_dev *bdev_io_wait_vdev;
535 
536 	/** Number of bytes that were written. */
537 	uint32_t used_len;
538 	uint16_t iovcnt;
539 	struct iovec iovs[SPDK_VHOST_IOVS_MAX];
540 
541 	/** Size of whole payload in bytes */
542 	uint32_t payload_size;
543 };
544 
545 int virtio_blk_process_request(struct spdk_vhost_dev *vdev, struct spdk_io_channel *ch,
546 			       struct spdk_vhost_blk_task *task, virtio_blk_request_cb cb, void *cb_arg);
547 
548 typedef void (*bdev_event_cb_complete)(struct spdk_vhost_dev *vdev, void *ctx);
549 
550 #define SPDK_VIRTIO_BLK_TRSTRING_MAX_LEN 32
551 
552 struct spdk_virtio_blk_transport_ops {
553 	/**
554 	 * Transport name
555 	 */
556 	char name[SPDK_VIRTIO_BLK_TRSTRING_MAX_LEN];
557 
558 	/**
559 	 * Create a transport for the given transport opts
560 	 */
561 	struct spdk_virtio_blk_transport *(*create)(const struct spdk_json_val *params);
562 
563 	/**
564 	 * Dump transport-specific opts into JSON
565 	 */
566 	void (*dump_opts)(struct spdk_virtio_blk_transport *transport, struct spdk_json_write_ctx *w);
567 
568 	/**
569 	 * Destroy the transport
570 	 */
571 	int (*destroy)(struct spdk_virtio_blk_transport *transport,
572 		       spdk_vhost_fini_cb cb_fn);
573 
574 	/**
575 	 * Create vhost block controller
576 	 */
577 	int (*create_ctrlr)(struct spdk_vhost_dev *vdev, struct spdk_cpuset *cpumask,
578 			    const char *address, const struct spdk_json_val *params,
579 			    void *custom_opts);
580 
581 	/**
582 	 * Destroy vhost block controller
583 	 */
584 	int (*destroy_ctrlr)(struct spdk_vhost_dev *vdev);
585 
586 	/*
587 	 * Signal removal of the bdev.
588 	 */
589 	void (*bdev_event)(enum spdk_bdev_event_type type, struct spdk_vhost_dev *vdev,
590 			   bdev_event_cb_complete cb, void *cb_arg);
591 };
592 
593 struct spdk_virtio_blk_transport {
594 	const struct spdk_virtio_blk_transport_ops	*ops;
595 	TAILQ_ENTRY(spdk_virtio_blk_transport)		tailq;
596 };
597 
598 struct virtio_blk_transport_ops_list_element {
599 	struct spdk_virtio_blk_transport_ops			ops;
600 	TAILQ_ENTRY(virtio_blk_transport_ops_list_element)	link;
601 };
602 
603 void virtio_blk_transport_register(const struct spdk_virtio_blk_transport_ops *ops);
604 int virtio_blk_transport_create(const char *transport_name, const struct spdk_json_val *params);
605 int virtio_blk_transport_destroy(struct spdk_virtio_blk_transport *transport,
606 				 spdk_vhost_fini_cb cb_fn);
607 
608 const struct spdk_virtio_blk_transport_ops *virtio_blk_get_transport_ops(
609 	const char *transport_name);
610 
611 
612 /*
613  * Macro used to register new transports.
614  */
615 #define SPDK_VIRTIO_BLK_TRANSPORT_REGISTER(name, transport_ops) \
616 static void __attribute__((constructor)) _virtio_blk_transport_register_##name(void) \
617 { \
618 	virtio_blk_transport_register(transport_ops); \
619 }\
620 
621 #endif /* SPDK_VHOST_INTERNAL_H */
622