xref: /spdk/lib/vhost/vhost_internal.h (revision 307b8c112ffd90a26d53dd15fad67bd9038ef526)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #ifndef SPDK_VHOST_INTERNAL_H
7 #define SPDK_VHOST_INTERNAL_H
8 #include <linux/virtio_config.h>
9 
10 #include "spdk/stdinc.h"
11 
12 #include <rte_vhost.h>
13 
14 #include "spdk_internal/vhost_user.h"
15 #include "spdk/bdev.h"
16 #include "spdk/log.h"
17 #include "spdk/util.h"
18 #include "spdk/rpc.h"
19 #include "spdk/config.h"
20 
21 #define SPDK_VHOST_MAX_VQUEUES	256
22 #define SPDK_VHOST_MAX_VQ_SIZE	1024
23 
24 #define SPDK_VHOST_SCSI_CTRLR_MAX_DEVS 8
25 
26 #define SPDK_VHOST_IOVS_MAX 129
27 
28 #define SPDK_VHOST_VQ_MAX_SUBMISSIONS	32
29 
30 /*
31  * Rate at which stats are checked for interrupt coalescing.
32  */
33 #define SPDK_VHOST_STATS_CHECK_INTERVAL_MS 10
34 /*
35  * Default threshold at which interrupts start to be coalesced.
36  */
37 #define SPDK_VHOST_VQ_IOPS_COALESCING_THRESHOLD 60000
38 
39 /*
40  * Currently coalescing is not used by default.
41  * Setting this to value > 0 here or by RPC will enable coalescing.
42  */
43 #define SPDK_VHOST_COALESCING_DELAY_BASE_US 0
44 
45 #define SPDK_VHOST_FEATURES ((1ULL << VHOST_F_LOG_ALL) | \
46 	(1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
47 	(1ULL << VIRTIO_F_VERSION_1) | \
48 	(1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \
49 	(1ULL << VIRTIO_RING_F_EVENT_IDX) | \
50 	(1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
51 	(1ULL << VIRTIO_F_RING_PACKED) | \
52 	(1ULL << VIRTIO_F_ANY_LAYOUT))
53 
54 #define SPDK_VHOST_DISABLED_FEATURES ((1ULL << VIRTIO_RING_F_EVENT_IDX) | \
55 	(1ULL << VIRTIO_F_NOTIFY_ON_EMPTY))
56 
57 #define VRING_DESC_F_AVAIL	(1ULL << VRING_PACKED_DESC_F_AVAIL)
58 #define VRING_DESC_F_USED	(1ULL << VRING_PACKED_DESC_F_USED)
59 #define VRING_DESC_F_AVAIL_USED	(VRING_DESC_F_AVAIL | VRING_DESC_F_USED)
60 
61 typedef struct rte_vhost_resubmit_desc spdk_vhost_resubmit_desc;
62 typedef struct rte_vhost_resubmit_info spdk_vhost_resubmit_info;
63 typedef struct rte_vhost_inflight_desc_packed	spdk_vhost_inflight_desc;
64 
65 struct spdk_vhost_virtqueue {
66 	struct rte_vhost_vring vring;
67 	struct rte_vhost_ring_inflight vring_inflight;
68 	uint16_t last_avail_idx;
69 	uint16_t last_used_idx;
70 
71 	struct {
72 		/* To mark a descriptor as available in packed ring
73 		 * Equal to avail_wrap_counter in spec.
74 		 */
75 		uint8_t avail_phase	: 1;
76 		/* To mark a descriptor as used in packed ring
77 		 * Equal to used_wrap_counter in spec.
78 		 */
79 		uint8_t used_phase	: 1;
80 		uint8_t padding		: 5;
81 		bool packed_ring	: 1;
82 	} packed;
83 
84 	void *tasks;
85 
86 	/* Request count from last stats check */
87 	uint32_t req_cnt;
88 
89 	/* Request count from last event */
90 	uint16_t used_req_cnt;
91 
92 	/* How long interrupt is delayed */
93 	uint32_t irq_delay_time;
94 
95 	/* Next time when we need to send event */
96 	uint64_t next_event_time;
97 
98 	/* Associated vhost_virtqueue in the virtio device's virtqueue list */
99 	uint32_t vring_idx;
100 
101 	struct spdk_vhost_session *vsession;
102 
103 	struct spdk_interrupt *intr;
104 } __attribute((aligned(SPDK_CACHE_LINE_SIZE)));
105 
106 struct spdk_vhost_session {
107 	struct spdk_vhost_dev *vdev;
108 
109 	/* rte_vhost connection ID. */
110 	int vid;
111 
112 	/* Unique session ID. */
113 	uint64_t id;
114 	/* Unique session name. */
115 	char *name;
116 
117 	bool initialized;
118 	bool started;
119 	bool needs_restart;
120 	bool forced_polling;
121 	bool interrupt_mode;
122 	bool skip_used_signal;
123 
124 	struct rte_vhost_memory *mem;
125 
126 	int task_cnt;
127 
128 	uint16_t max_queues;
129 
130 	uint64_t negotiated_features;
131 
132 	/* Local copy of device coalescing settings. */
133 	uint32_t coalescing_delay_time_base;
134 	uint32_t coalescing_io_rate_threshold;
135 
136 	/* Next time when stats for event coalescing will be checked. */
137 	uint64_t next_stats_check_time;
138 
139 	/* Interval used for event coalescing checking. */
140 	uint64_t stats_check_interval;
141 
142 	/* Session's stop poller will only try limited times to destroy the session. */
143 	uint32_t stop_retry_count;
144 
145 	struct spdk_vhost_virtqueue virtqueue[SPDK_VHOST_MAX_VQUEUES];
146 
147 	TAILQ_ENTRY(spdk_vhost_session) tailq;
148 };
149 
150 struct spdk_vhost_user_dev {
151 	struct spdk_vhost_dev *vdev;
152 
153 	const struct spdk_vhost_user_dev_backend *user_backend;
154 
155 	/* Saved original values used to setup coalescing to avoid integer
156 	 * rounding issues during save/load config.
157 	 */
158 	uint32_t coalescing_delay_us;
159 	uint32_t coalescing_iops_threshold;
160 
161 	/* Current connections to the device */
162 	TAILQ_HEAD(, spdk_vhost_session) vsessions;
163 
164 	/* Increment-only session counter */
165 	uint64_t vsessions_num;
166 
167 	/* Number of started and actively polled sessions */
168 	uint32_t active_session_num;
169 
170 	/* Number of pending asynchronous operations */
171 	uint32_t pending_async_op_num;
172 };
173 
174 struct spdk_vhost_dev {
175 	char *name;
176 	char *path;
177 
178 	struct spdk_thread *thread;
179 	bool registered;
180 
181 	uint64_t virtio_features;
182 	uint64_t disabled_features;
183 	uint64_t protocol_features;
184 	bool packed_ring_recovery;
185 
186 	const struct spdk_vhost_dev_backend *backend;
187 
188 	/* Context passed from transport */
189 	void *ctxt;
190 
191 	TAILQ_ENTRY(spdk_vhost_dev) tailq;
192 };
193 
194 /**
195  * \param vdev vhost device.
196  * \param vsession vhost session.
197  * \param arg user-provided parameter.
198  *
199  * \return negative values will break the foreach call, meaning
200  * the function won't be called again. Return codes zero and
201  * positive don't have any effect.
202  */
203 typedef int (*spdk_vhost_session_fn)(struct spdk_vhost_dev *vdev,
204 				     struct spdk_vhost_session *vsession,
205 				     void *arg);
206 
207 /**
208  * \param vdev vhost device.
209  * \param arg user-provided parameter.
210  */
211 typedef void (*spdk_vhost_dev_fn)(struct spdk_vhost_dev *vdev, void *arg);
212 
213 struct spdk_vhost_user_dev_backend {
214 	/**
215 	 * Size of additional per-session context data
216 	 * allocated whenever a new client connects.
217 	 */
218 	size_t session_ctx_size;
219 
220 	spdk_vhost_session_fn start_session;
221 	int (*stop_session)(struct spdk_vhost_session *vsession);
222 };
223 
224 enum vhost_backend_type {
225 	VHOST_BACKEND_BLK = 0,
226 	VHOST_BACKEND_SCSI,
227 };
228 
229 struct spdk_vhost_dev_backend {
230 	enum vhost_backend_type type;
231 
232 	int (*vhost_get_config)(struct spdk_vhost_dev *vdev, uint8_t *config, uint32_t len);
233 	int (*vhost_set_config)(struct spdk_vhost_dev *vdev, uint8_t *config,
234 				uint32_t offset, uint32_t size, uint32_t flags);
235 
236 	void (*dump_info_json)(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
237 	void (*write_config_json)(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
238 	int (*remove_device)(struct spdk_vhost_dev *vdev);
239 };
240 
241 void *vhost_gpa_to_vva(struct spdk_vhost_session *vsession, uint64_t addr, uint64_t len);
242 
243 uint16_t vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *vq, uint16_t *reqs,
244 				 uint16_t reqs_len);
245 
246 /**
247  * Get a virtio split descriptor at given index in given virtqueue.
248  * The descriptor will provide access to the entire descriptor
249  * chain. The subsequent descriptors are accessible via
250  * \c spdk_vhost_vring_desc_get_next.
251  * \param vsession vhost session
252  * \param vq virtqueue
253  * \param req_idx descriptor index
254  * \param desc pointer to be set to the descriptor
255  * \param desc_table descriptor table to be used with
256  * \c spdk_vhost_vring_desc_get_next. This might be either
257  * default virtqueue descriptor table or per-chain indirect
258  * table.
259  * \param desc_table_size size of the *desc_table*
260  * \return 0 on success, -1 if given index is invalid.
261  * If -1 is returned, the content of params is undefined.
262  */
263 int vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq,
264 		      uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
265 		      uint32_t *desc_table_size);
266 
267 /**
268  * Get a virtio packed descriptor at given index in given virtqueue.
269  * The descriptor will provide access to the entire descriptor
270  * chain. The subsequent descriptors are accessible via
271  * \c vhost_vring_packed_desc_get_next.
272  * \param vsession vhost session
273  * \param vq virtqueue
274  * \param req_idx descriptor index
275  * \param desc pointer to be set to the descriptor
276  * \param desc_table descriptor table to be used with
277  * \c spdk_vhost_vring_desc_get_next. This might be either
278  * \c NULL or per-chain indirect table.
279  * \param desc_table_size size of the *desc_table*
280  * \return 0 on success, -1 if given index is invalid.
281  * If -1 is returned, the content of params is undefined.
282  */
283 int vhost_vq_get_desc_packed(struct spdk_vhost_session *vsession,
284 			     struct spdk_vhost_virtqueue *virtqueue,
285 			     uint16_t req_idx, struct vring_packed_desc **desc,
286 			     struct vring_packed_desc **desc_table, uint32_t *desc_table_size);
287 
288 int vhost_inflight_queue_get_desc(struct spdk_vhost_session *vsession,
289 				  spdk_vhost_inflight_desc *desc_array,
290 				  uint16_t req_idx, spdk_vhost_inflight_desc **desc,
291 				  struct vring_packed_desc  **desc_table, uint32_t *desc_table_size);
292 
293 /**
294  * Send IRQ/call client (if pending) for \c vq.
295  * \param vsession vhost session
296  * \param vq virtqueue
297  * \return
298  *   0 - if no interrupt was signalled
299  *   1 - if interrupt was signalled
300  */
301 int vhost_vq_used_signal(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq);
302 
303 
304 /**
305  * Send IRQs for all queues that need to be signaled.
306  * \param vsession vhost session
307  * \param vq virtqueue
308  */
309 void vhost_session_used_signal(struct spdk_vhost_session *vsession);
310 
311 /**
312  * Send IRQs for the queue that need to be signaled.
313  * \param vq virtqueue
314  */
315 void vhost_session_vq_used_signal(struct spdk_vhost_virtqueue *virtqueue);
316 
317 void vhost_vq_used_ring_enqueue(struct spdk_vhost_session *vsession,
318 				struct spdk_vhost_virtqueue *vq,
319 				uint16_t id, uint32_t len);
320 
321 /**
322  * Enqueue the entry to the used ring when device complete the request.
323  * \param vsession vhost session
324  * \param vq virtqueue
325  * \req_idx descriptor index. It's the first index of this descriptor chain.
326  * \num_descs descriptor count. It's the count of the number of buffers in the chain.
327  * \buffer_id descriptor buffer ID.
328  * \length device write length. Specify the length of the buffer that has been initialized
329  * (written to) by the device
330  * \inflight_head the head idx of this IO inflight desc chain.
331  */
332 void vhost_vq_packed_ring_enqueue(struct spdk_vhost_session *vsession,
333 				  struct spdk_vhost_virtqueue *virtqueue,
334 				  uint16_t num_descs, uint16_t buffer_id,
335 				  uint32_t length, uint16_t inflight_head);
336 
337 /**
338  * Get subsequent descriptor from given table.
339  * \param desc current descriptor, will be set to the
340  * next descriptor (NULL in case this is the last
341  * descriptor in the chain or the next desc is invalid)
342  * \param desc_table descriptor table
343  * \param desc_table_size size of the *desc_table*
344  * \return 0 on success, -1 if given index is invalid
345  * The *desc* param will be set regardless of the
346  * return value.
347  */
348 int vhost_vring_desc_get_next(struct vring_desc **desc,
349 			      struct vring_desc *desc_table, uint32_t desc_table_size);
350 static inline bool
351 vhost_vring_desc_is_wr(struct vring_desc *cur_desc)
352 {
353 	return !!(cur_desc->flags & VRING_DESC_F_WRITE);
354 }
355 
356 int vhost_vring_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
357 			    uint16_t *iov_index, const struct vring_desc *desc);
358 
359 bool vhost_vq_packed_ring_is_avail(struct spdk_vhost_virtqueue *virtqueue);
360 
361 /**
362  * Get subsequent descriptor from vq or desc table.
363  * \param desc current descriptor, will be set to the
364  * next descriptor (NULL in case this is the last
365  * descriptor in the chain or the next desc is invalid)
366  * \req_idx index of current desc, will be set to the next
367  * index. If desc_table != NULL the req_idx is the the vring index
368  * or the req_idx is the desc_table index.
369  * \param desc_table descriptor table
370  * \param desc_table_size size of the *desc_table*
371  * \return 0 on success, -1 if given index is invalid
372  * The *desc* param will be set regardless of the
373  * return value.
374  */
375 int vhost_vring_packed_desc_get_next(struct vring_packed_desc **desc, uint16_t *req_idx,
376 				     struct spdk_vhost_virtqueue *vq,
377 				     struct vring_packed_desc *desc_table,
378 				     uint32_t desc_table_size);
379 
380 bool vhost_vring_packed_desc_is_wr(struct vring_packed_desc *cur_desc);
381 
382 int vhost_vring_packed_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
383 				   uint16_t *iov_index, const struct vring_packed_desc *desc);
384 
385 bool vhost_vring_inflight_desc_is_wr(spdk_vhost_inflight_desc *cur_desc);
386 
387 int vhost_vring_inflight_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
388 				     uint16_t *iov_index, const spdk_vhost_inflight_desc *desc);
389 
390 uint16_t vhost_vring_packed_desc_get_buffer_id(struct spdk_vhost_virtqueue *vq, uint16_t req_idx,
391 		uint16_t *num_descs);
392 
393 static inline bool
394 __attribute__((always_inline))
395 vhost_dev_has_feature(struct spdk_vhost_session *vsession, unsigned feature_id)
396 {
397 	return vsession->negotiated_features & (1ULL << feature_id);
398 }
399 
400 int vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
401 		       const struct spdk_json_val *params,
402 		       const struct spdk_vhost_dev_backend *backend,
403 		       const struct spdk_vhost_user_dev_backend *user_backend);
404 int vhost_dev_unregister(struct spdk_vhost_dev *vdev);
405 
406 void vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
407 
408 /*
409  * Set vhost session to run in interrupt or poll mode
410  */
411 void vhost_user_session_set_interrupt_mode(struct spdk_vhost_session *vsession,
412 		bool interrupt_mode);
413 
414 /*
415  * Memory registration functions used in start/stop device callbacks
416  */
417 void vhost_session_mem_register(struct rte_vhost_memory *mem);
418 void vhost_session_mem_unregister(struct rte_vhost_memory *mem);
419 
420 /*
421  * Call a function for each session of the provided vhost device.
422  * The function will be called one-by-one on each session's thread.
423  *
424  * \param vdev vhost device
425  * \param fn function to call on each session's thread
426  * \param cpl_fn function to be called at the end of the iteration on
427  * the vhost management thread.
428  * Optional, can be NULL.
429  * \param arg additional argument to the both callbacks
430  */
431 void vhost_user_dev_foreach_session(struct spdk_vhost_dev *dev,
432 				    spdk_vhost_session_fn fn,
433 				    spdk_vhost_dev_fn cpl_fn,
434 				    void *arg);
435 
436 /**
437  * Call a function on the provided lcore and block until either
438  * vhost_user_session_start_done() or vhost_user_session_stop_done()
439  * is called.
440  *
441  * This must be called under the global vhost mutex, which this function
442  * will unlock for the time it's waiting. It's meant to be called only
443  * from start/stop session callbacks.
444  *
445  * \param vsession vhost session
446  * \param cb_fn the function to call. The void *arg parameter in cb_fn
447  * is always NULL.
448  * \param timeout_sec timeout in seconds. This function will still
449  * block after the timeout expires, but will print the provided errmsg.
450  * \param errmsg error message to print once the timeout expires
451  * \return return the code passed to spdk_vhost_session_event_done().
452  */
453 int vhost_user_session_send_event(struct spdk_vhost_session *vsession,
454 				  spdk_vhost_session_fn cb_fn, unsigned timeout_sec,
455 				  const char *errmsg);
456 
457 /**
458  * Finish a blocking spdk_vhost_user_session_send_event() call and finally
459  * start the session. This must be called on the target lcore, which
460  * will now receive all session-related messages (e.g. from
461  * vhost_user_dev_foreach_session()).
462  *
463  * Must be called under the global vhost lock.
464  *
465  * \param vsession vhost session
466  * \param response return code
467  */
468 void vhost_user_session_start_done(struct spdk_vhost_session *vsession, int response);
469 
470 /**
471  * Finish a blocking spdk_vhost_user_session_send_event() call and finally
472  * stop the session. This must be called on the session's lcore which
473  * used to receive all session-related messages (e.g. from
474  * vhost_user_dev_foreach_session()). After this call, the session-
475  * related messages will be once again processed by any arbitrary thread.
476  *
477  * Must be called under the global vhost lock.
478  *
479  * Must be called under the global vhost mutex.
480  *
481  * \param vsession vhost session
482  * \param response return code
483  */
484 void vhost_user_session_stop_done(struct spdk_vhost_session *vsession, int response);
485 
486 struct spdk_vhost_session *vhost_session_find_by_vid(int vid);
487 void vhost_session_install_rte_compat_hooks(struct spdk_vhost_session *vsession);
488 int vhost_register_unix_socket(const char *path, const char *ctrl_name,
489 			       uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features);
490 int vhost_driver_unregister(const char *path);
491 int vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
492 int vhost_get_negotiated_features(int vid, uint64_t *negotiated_features);
493 
494 int remove_vhost_controller(struct spdk_vhost_dev *vdev);
495 
496 struct spdk_io_channel *vhost_blk_get_io_channel(struct spdk_vhost_dev *vdev);
497 void vhost_blk_put_io_channel(struct spdk_io_channel *ch);
498 
499 /* The spdk_bdev pointer should only be used to retrieve
500  * the device properties, ex. number of blocks or I/O type supported. */
501 struct spdk_bdev *vhost_blk_get_bdev(struct spdk_vhost_dev *vdev);
502 
503 /* Function calls from vhost.c to rte_vhost_user.c,
504  * shall removed once virtio transport abstraction is complete. */
505 int vhost_user_session_set_coalescing(struct spdk_vhost_dev *dev,
506 				      struct spdk_vhost_session *vsession, void *ctx);
507 int vhost_user_dev_set_coalescing(struct spdk_vhost_user_dev *user_dev, uint32_t delay_base_us,
508 				  uint32_t iops_threshold);
509 int vhost_user_dev_register(struct spdk_vhost_dev *vdev, const char *name,
510 			    struct spdk_cpuset *cpumask, const struct spdk_vhost_user_dev_backend *user_backend);
511 int vhost_user_dev_unregister(struct spdk_vhost_dev *vdev);
512 int vhost_user_init(void);
513 void vhost_user_fini(spdk_vhost_fini_cb vhost_cb);
514 
515 int virtio_blk_construct_ctrlr(struct spdk_vhost_dev *vdev, const char *address,
516 			       struct spdk_cpuset *cpumask, const struct spdk_json_val *params,
517 			       const struct spdk_vhost_user_dev_backend *user_backend);
518 int virtio_blk_destroy_ctrlr(struct spdk_vhost_dev *vdev);
519 
520 struct spdk_vhost_blk_task;
521 
522 typedef void (*virtio_blk_request_cb)(uint8_t status, struct spdk_vhost_blk_task *task,
523 				      void *cb_arg);
524 
525 struct spdk_vhost_blk_task {
526 	struct spdk_bdev_io *bdev_io;
527 	virtio_blk_request_cb cb;
528 	void *cb_arg;
529 
530 	volatile uint8_t *status;
531 
532 	/* for io wait */
533 	struct spdk_bdev_io_wait_entry bdev_io_wait;
534 	struct spdk_io_channel *bdev_io_wait_ch;
535 	struct spdk_vhost_dev *bdev_io_wait_vdev;
536 
537 	/** Number of bytes that were written. */
538 	uint32_t used_len;
539 	uint16_t iovcnt;
540 	struct iovec iovs[SPDK_VHOST_IOVS_MAX];
541 
542 	/** Size of whole payload in bytes */
543 	uint32_t payload_size;
544 };
545 
546 int virtio_blk_process_request(struct spdk_vhost_dev *vdev, struct spdk_io_channel *ch,
547 			       struct spdk_vhost_blk_task *task, virtio_blk_request_cb cb, void *cb_arg);
548 
549 typedef void (*bdev_event_cb_complete)(struct spdk_vhost_dev *vdev, void *ctx);
550 
551 #define SPDK_VIRTIO_BLK_TRSTRING_MAX_LEN 32
552 
553 struct spdk_virtio_blk_transport_ops {
554 	/**
555 	 * Transport name
556 	 */
557 	char name[SPDK_VIRTIO_BLK_TRSTRING_MAX_LEN];
558 
559 	/**
560 	 * Create a transport for the given transport opts
561 	 */
562 	struct spdk_virtio_blk_transport *(*create)(const struct spdk_json_val *params);
563 
564 	/**
565 	 * Dump transport-specific opts into JSON
566 	 */
567 	void (*dump_opts)(struct spdk_virtio_blk_transport *transport, struct spdk_json_write_ctx *w);
568 
569 	/**
570 	 * Destroy the transport
571 	 */
572 	int (*destroy)(struct spdk_virtio_blk_transport *transport,
573 		       spdk_vhost_fini_cb cb_fn);
574 
575 	/**
576 	 * Create vhost block controller
577 	 */
578 	int (*create_ctrlr)(struct spdk_vhost_dev *vdev, struct spdk_cpuset *cpumask,
579 			    const char *address, const struct spdk_json_val *params,
580 			    void *custom_opts);
581 
582 	/**
583 	 * Destroy vhost block controller
584 	 */
585 	int (*destroy_ctrlr)(struct spdk_vhost_dev *vdev);
586 
587 	/*
588 	 * Signal removal of the bdev.
589 	 */
590 	void (*bdev_event)(enum spdk_bdev_event_type type, struct spdk_vhost_dev *vdev,
591 			   bdev_event_cb_complete cb, void *cb_arg);
592 };
593 
594 struct spdk_virtio_blk_transport {
595 	const struct spdk_virtio_blk_transport_ops	*ops;
596 	TAILQ_ENTRY(spdk_virtio_blk_transport)		tailq;
597 };
598 
599 struct virtio_blk_transport_ops_list_element {
600 	struct spdk_virtio_blk_transport_ops			ops;
601 	TAILQ_ENTRY(virtio_blk_transport_ops_list_element)	link;
602 };
603 
604 void virtio_blk_transport_register(const struct spdk_virtio_blk_transport_ops *ops);
605 int virtio_blk_transport_create(const char *transport_name, const struct spdk_json_val *params);
606 int virtio_blk_transport_destroy(struct spdk_virtio_blk_transport *transport,
607 				 spdk_vhost_fini_cb cb_fn);
608 
609 const struct spdk_virtio_blk_transport_ops *virtio_blk_get_transport_ops(
610 	const char *transport_name);
611 
612 
613 /*
614  * Macro used to register new transports.
615  */
616 #define SPDK_VIRTIO_BLK_TRANSPORT_REGISTER(name, transport_ops) \
617 static void __attribute__((constructor)) _virtio_blk_transport_register_##name(void) \
618 { \
619 	virtio_blk_transport_register(transport_ops); \
620 }
621 
622 #endif /* SPDK_VHOST_INTERNAL_H */
623