xref: /spdk/module/bdev/virtio/bdev_virtio_blk.c (revision 88e3ffd7b6c5ec1ea1a660354d25f02c766092e1)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/bdev.h"
37 #include "spdk/endian.h"
38 #include "spdk/env.h"
39 #include "spdk/thread.h"
40 #include "spdk/string.h"
41 #include "spdk/util.h"
42 #include "spdk/json.h"
43 
44 #include "spdk_internal/assert.h"
45 #include "spdk/bdev_module.h"
46 #include "spdk/log.h"
47 #include "spdk_internal/virtio.h"
48 #include "spdk_internal/vhost_user.h"
49 
50 #include <linux/virtio_blk.h>
51 #include <linux/virtio_ids.h>
52 
53 #include "bdev_virtio.h"
54 
55 struct virtio_blk_dev {
56 	struct virtio_dev		vdev;
57 	struct spdk_bdev		bdev;
58 	bool				readonly;
59 	bool				unmap;
60 };
61 
62 struct virtio_blk_io_ctx {
63 	struct iovec				iov_req;
64 	struct iovec				iov_resp;
65 	struct iovec				iov_unmap;
66 	struct virtio_blk_outhdr		req;
67 	struct virtio_blk_discard_write_zeroes	unmap;
68 	uint8_t					resp;
69 };
70 
71 struct bdev_virtio_blk_io_channel {
72 	struct virtio_dev		*vdev;
73 
74 	/** Virtqueue exclusively assigned to this channel. */
75 	struct virtqueue		*vq;
76 
77 	/** Virtio response poller. */
78 	struct spdk_poller		*poller;
79 };
80 
81 /* Features desired/implemented by this driver. */
82 #define VIRTIO_BLK_DEV_SUPPORTED_FEATURES		\
83 	(1ULL << VIRTIO_BLK_F_BLK_SIZE		|	\
84 	 1ULL << VIRTIO_BLK_F_TOPOLOGY		|	\
85 	 1ULL << VIRTIO_BLK_F_MQ		|	\
86 	 1ULL << VIRTIO_BLK_F_RO		|	\
87 	 1ULL << VIRTIO_BLK_F_DISCARD		|	\
88 	 1ULL << VIRTIO_RING_F_EVENT_IDX	|	\
89 	 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
90 
91 static int bdev_virtio_initialize(void);
92 static int bdev_virtio_blk_get_ctx_size(void);
93 
94 static struct spdk_bdev_module virtio_blk_if = {
95 	.name = "virtio_blk",
96 	.module_init = bdev_virtio_initialize,
97 	.get_ctx_size = bdev_virtio_blk_get_ctx_size,
98 };
99 
100 SPDK_BDEV_MODULE_REGISTER(virtio_blk, &virtio_blk_if)
101 
102 static int bdev_virtio_blk_ch_create_cb(void *io_device, void *ctx_buf);
103 static void bdev_virtio_blk_ch_destroy_cb(void *io_device, void *ctx_buf);
104 
105 static struct virtio_blk_io_ctx *
106 bdev_virtio_blk_init_io_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
107 {
108 	struct virtio_blk_outhdr *req;
109 	uint8_t *resp;
110 	struct virtio_blk_discard_write_zeroes *desc;
111 
112 	struct virtio_blk_io_ctx *io_ctx = (struct virtio_blk_io_ctx *)bdev_io->driver_ctx;
113 
114 	req = &io_ctx->req;
115 	resp = &io_ctx->resp;
116 	desc = &io_ctx->unmap;
117 
118 	io_ctx->iov_req.iov_base = req;
119 	io_ctx->iov_req.iov_len = sizeof(*req);
120 
121 	io_ctx->iov_resp.iov_base = resp;
122 	io_ctx->iov_resp.iov_len = sizeof(*resp);
123 
124 	io_ctx->iov_unmap.iov_base = desc;
125 	io_ctx->iov_unmap.iov_len = sizeof(*desc);
126 
127 	memset(req, 0, sizeof(*req));
128 	return io_ctx;
129 }
130 
131 static void
132 bdev_virtio_blk_send_io(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
133 {
134 	struct bdev_virtio_blk_io_channel *virtio_channel = spdk_io_channel_get_ctx(ch);
135 	struct virtqueue *vq = virtio_channel->vq;
136 	struct virtio_blk_io_ctx *io_ctx = (struct virtio_blk_io_ctx *)bdev_io->driver_ctx;
137 	int rc;
138 
139 	rc = virtqueue_req_start(vq, bdev_io, bdev_io->u.bdev.iovcnt + 2);
140 	if (rc == -ENOMEM) {
141 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
142 		return;
143 	} else if (rc != 0) {
144 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
145 		return;
146 	}
147 
148 	virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO);
149 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP) {
150 		virtqueue_req_add_iovs(vq, &io_ctx->iov_unmap, 1, SPDK_VIRTIO_DESC_RO);
151 	} else {
152 		virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
153 				       bdev_io->type == SPDK_BDEV_IO_TYPE_READ ?
154 				       SPDK_VIRTIO_DESC_WR : SPDK_VIRTIO_DESC_RO);
155 	}
156 	virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
157 
158 	virtqueue_req_flush(vq);
159 }
160 
161 static void
162 bdev_virtio_command(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
163 {
164 	struct virtio_blk_io_ctx *io_ctx = bdev_virtio_blk_init_io_vreq(ch, bdev_io);
165 	struct virtio_blk_outhdr *req = &io_ctx->req;
166 	struct virtio_blk_discard_write_zeroes *desc = &io_ctx->unmap;
167 
168 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
169 		req->type = VIRTIO_BLK_T_IN;
170 	} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
171 		req->type = VIRTIO_BLK_T_OUT;
172 	} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP) {
173 		req->type = VIRTIO_BLK_T_DISCARD;
174 		desc->sector = bdev_io->u.bdev.offset_blocks *
175 			       spdk_bdev_get_block_size(bdev_io->bdev) / 512;
176 		desc->num_sectors = bdev_io->u.bdev.num_blocks *
177 				    spdk_bdev_get_block_size(bdev_io->bdev) / 512;
178 		desc->flags = 0;
179 	}
180 
181 	req->sector = bdev_io->u.bdev.offset_blocks *
182 		      spdk_bdev_get_block_size(bdev_io->bdev) / 512;
183 
184 	bdev_virtio_blk_send_io(ch, bdev_io);
185 }
186 
187 static void
188 bdev_virtio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
189 		       bool success)
190 {
191 	if (!success) {
192 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
193 		return;
194 	}
195 
196 	bdev_virtio_command(ch, bdev_io);
197 }
198 
199 static int
200 _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
201 {
202 	struct virtio_blk_dev *bvdev = bdev_io->bdev->ctxt;
203 
204 	switch (bdev_io->type) {
205 	case SPDK_BDEV_IO_TYPE_READ:
206 		spdk_bdev_io_get_buf(bdev_io, bdev_virtio_get_buf_cb,
207 				     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
208 		return 0;
209 	case SPDK_BDEV_IO_TYPE_WRITE:
210 		if (bvdev->readonly) {
211 			spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
212 		} else {
213 			bdev_virtio_command(ch, bdev_io);
214 		}
215 		return 0;
216 	case SPDK_BDEV_IO_TYPE_RESET:
217 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
218 		return 0;
219 	case SPDK_BDEV_IO_TYPE_UNMAP:
220 		if (bvdev->unmap) {
221 			bdev_virtio_command(ch, bdev_io);
222 		} else {
223 			spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
224 		}
225 		return 0;
226 	case SPDK_BDEV_IO_TYPE_FLUSH:
227 	default:
228 		return -1;
229 	}
230 
231 	SPDK_UNREACHABLE();
232 }
233 
234 static void
235 bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
236 {
237 	if (_bdev_virtio_submit_request(ch, bdev_io) < 0) {
238 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
239 	}
240 }
241 
242 static bool
243 bdev_virtio_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
244 {
245 	struct virtio_blk_dev *bvdev = ctx;
246 
247 	switch (io_type) {
248 	case SPDK_BDEV_IO_TYPE_READ:
249 	case SPDK_BDEV_IO_TYPE_RESET:
250 		return true;
251 	case SPDK_BDEV_IO_TYPE_WRITE:
252 		return !bvdev->readonly;
253 	case SPDK_BDEV_IO_TYPE_UNMAP:
254 		return bvdev->unmap;
255 	case SPDK_BDEV_IO_TYPE_FLUSH:
256 	default:
257 		return false;
258 	}
259 }
260 
261 static struct spdk_io_channel *
262 bdev_virtio_get_io_channel(void *ctx)
263 {
264 	struct virtio_blk_dev *bvdev = ctx;
265 
266 	return spdk_get_io_channel(bvdev);
267 }
268 
269 static void
270 virtio_blk_dev_unregister_cb(void *io_device)
271 {
272 	struct virtio_blk_dev *bvdev = io_device;
273 	struct virtio_dev *vdev = &bvdev->vdev;
274 
275 	virtio_dev_stop(vdev);
276 	virtio_dev_destruct(vdev);
277 	spdk_bdev_destruct_done(&bvdev->bdev, 0);
278 	free(bvdev);
279 }
280 
281 static int
282 bdev_virtio_disk_destruct(void *ctx)
283 {
284 	struct virtio_blk_dev *bvdev = ctx;
285 
286 	spdk_io_device_unregister(bvdev, virtio_blk_dev_unregister_cb);
287 	return 1;
288 }
289 
290 int
291 bdev_virtio_blk_dev_remove(const char *name, bdev_virtio_remove_cb cb_fn, void *cb_arg)
292 {
293 	struct spdk_bdev *bdev;
294 
295 	bdev = spdk_bdev_get_by_name(name);
296 	if (bdev == NULL) {
297 		return -ENODEV;
298 	}
299 
300 	if (bdev->module != &virtio_blk_if) {
301 		return -ENODEV;
302 	}
303 
304 	spdk_bdev_unregister(bdev, cb_fn, cb_arg);
305 
306 	return 0;
307 }
308 
309 static int
310 bdev_virtio_dump_json_config(void *ctx, struct spdk_json_write_ctx *w)
311 {
312 	struct virtio_blk_dev *bvdev = ctx;
313 
314 	virtio_dev_dump_json_info(&bvdev->vdev, w);
315 	return 0;
316 }
317 
318 static void
319 bdev_virtio_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
320 {
321 	struct virtio_blk_dev *bvdev = bdev->ctxt;
322 
323 	spdk_json_write_object_begin(w);
324 
325 	spdk_json_write_named_string(w, "method", "bdev_virtio_attach_controller");
326 
327 	spdk_json_write_named_object_begin(w, "params");
328 	spdk_json_write_named_string(w, "name", bvdev->vdev.name);
329 	spdk_json_write_named_string(w, "dev_type", "blk");
330 
331 	/* Write transport specific parameters. */
332 	bvdev->vdev.backend_ops->write_json_config(&bvdev->vdev, w);
333 
334 	spdk_json_write_object_end(w);
335 
336 	spdk_json_write_object_end(w);
337 }
338 
339 static const struct spdk_bdev_fn_table virtio_fn_table = {
340 	.destruct		= bdev_virtio_disk_destruct,
341 	.submit_request		= bdev_virtio_submit_request,
342 	.io_type_supported	= bdev_virtio_io_type_supported,
343 	.get_io_channel		= bdev_virtio_get_io_channel,
344 	.dump_info_json		= bdev_virtio_dump_json_config,
345 	.write_config_json	= bdev_virtio_write_config_json,
346 };
347 
348 static void
349 bdev_virtio_io_cpl(struct spdk_bdev_io *bdev_io)
350 {
351 	struct virtio_blk_io_ctx *io_ctx = (struct virtio_blk_io_ctx *)bdev_io->driver_ctx;
352 
353 	spdk_bdev_io_complete(bdev_io, io_ctx->resp == VIRTIO_BLK_S_OK ?
354 			      SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED);
355 }
356 
357 static int
358 bdev_virtio_poll(void *arg)
359 {
360 	struct bdev_virtio_blk_io_channel *ch = arg;
361 	void *io[32];
362 	uint32_t io_len[32];
363 	uint16_t i, cnt;
364 
365 	cnt = virtio_recv_pkts(ch->vq, io, io_len, SPDK_COUNTOF(io));
366 	for (i = 0; i < cnt; ++i) {
367 		bdev_virtio_io_cpl(io[i]);
368 	}
369 
370 	return cnt;
371 }
372 
373 static int
374 bdev_virtio_blk_ch_create_cb(void *io_device, void *ctx_buf)
375 {
376 	struct virtio_blk_dev *bvdev = io_device;
377 	struct virtio_dev *vdev = &bvdev->vdev;
378 	struct bdev_virtio_blk_io_channel *ch = ctx_buf;
379 	struct virtqueue *vq;
380 	int32_t queue_idx;
381 
382 	queue_idx = virtio_dev_find_and_acquire_queue(vdev, 0);
383 	if (queue_idx < 0) {
384 		SPDK_ERRLOG("Couldn't get an unused queue for the io_channel.\n");
385 		return -1;
386 	}
387 
388 	vq = vdev->vqs[queue_idx];
389 
390 	ch->vdev = vdev;
391 	ch->vq = vq;
392 
393 	ch->poller = SPDK_POLLER_REGISTER(bdev_virtio_poll, ch, 0);
394 	return 0;
395 }
396 
397 static void
398 bdev_virtio_blk_ch_destroy_cb(void *io_device, void *ctx_buf)
399 {
400 	struct virtio_blk_dev *bvdev = io_device;
401 	struct virtio_dev *vdev = &bvdev->vdev;
402 	struct bdev_virtio_blk_io_channel *ch = ctx_buf;
403 	struct virtqueue *vq = ch->vq;
404 
405 	spdk_poller_unregister(&ch->poller);
406 	virtio_dev_release_queue(vdev, vq->vq_queue_index);
407 }
408 
409 static int
410 virtio_blk_dev_init(struct virtio_blk_dev *bvdev, uint16_t max_queues)
411 {
412 	struct virtio_dev *vdev = &bvdev->vdev;
413 	struct spdk_bdev *bdev = &bvdev->bdev;
414 	uint64_t capacity, num_blocks;
415 	uint32_t block_size, size_max, seg_max;
416 	uint16_t host_max_queues;
417 	int rc;
418 
419 	if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE)) {
420 		rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_blk_config, blk_size),
421 						&block_size, sizeof(block_size));
422 		if (rc) {
423 			SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc));
424 			return rc;
425 		}
426 
427 		if (block_size == 0 || block_size % 512 != 0) {
428 			SPDK_ERRLOG("%s: invalid block size (%"PRIu32"). Must be "
429 				    "a multiple of 512.\n", vdev->name, block_size);
430 			return -EIO;
431 		}
432 	} else {
433 		block_size = 512;
434 	}
435 
436 	rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_blk_config, capacity),
437 					&capacity, sizeof(capacity));
438 	if (rc) {
439 		SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc));
440 		return rc;
441 	}
442 
443 	/* `capacity` is a number of 512-byte sectors. */
444 	num_blocks = capacity * 512 / block_size;
445 	if (num_blocks == 0) {
446 		SPDK_ERRLOG("%s: size too small (size: %"PRIu64", blocksize: %"PRIu32").\n",
447 			    vdev->name, capacity * 512, block_size);
448 		return -EIO;
449 	}
450 
451 	if ((capacity * 512) % block_size != 0) {
452 		SPDK_WARNLOG("%s: size has been rounded down to the nearest block size boundary. "
453 			     "(block size: %"PRIu32", previous size: %"PRIu64", new size: %"PRIu64")\n",
454 			     vdev->name, block_size, capacity * 512, num_blocks * block_size);
455 	}
456 
457 	if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_MQ)) {
458 		rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_blk_config, num_queues),
459 						&host_max_queues, sizeof(host_max_queues));
460 		if (rc) {
461 			SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc));
462 			return rc;
463 		}
464 	} else {
465 		host_max_queues = 1;
466 	}
467 
468 	if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_SIZE_MAX)) {
469 		rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_blk_config, size_max),
470 						&size_max, sizeof(size_max));
471 		if (rc) {
472 			SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc));
473 			return rc;
474 		}
475 
476 		if (spdk_unlikely(size_max < block_size)) {
477 			SPDK_WARNLOG("%s: minimum segment size is set to block size %u forcefully.\n",
478 				     vdev->name, block_size);
479 			size_max = block_size;
480 		}
481 
482 		bdev->max_segment_size = size_max;
483 	}
484 
485 	if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_SEG_MAX)) {
486 		rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_blk_config, seg_max),
487 						&seg_max, sizeof(seg_max));
488 		if (rc) {
489 			SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc));
490 			return rc;
491 		}
492 
493 		if (spdk_unlikely(seg_max == 0)) {
494 			SPDK_ERRLOG("%s: virtio blk SEG_MAX can't be 0\n", vdev->name);
495 			return -EINVAL;
496 		}
497 
498 		bdev->max_num_segments = seg_max;
499 	}
500 
501 	if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_RO)) {
502 		bvdev->readonly = true;
503 	}
504 
505 	if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
506 		bvdev->unmap = true;
507 	}
508 
509 	if (max_queues == 0) {
510 		SPDK_ERRLOG("%s: requested 0 request queues (%"PRIu16" available).\n",
511 			    vdev->name, host_max_queues);
512 		return -EINVAL;
513 	}
514 
515 	if (max_queues > host_max_queues) {
516 		SPDK_WARNLOG("%s: requested %"PRIu16" request queues "
517 			     "but only %"PRIu16" available.\n",
518 			     vdev->name, max_queues, host_max_queues);
519 		max_queues = host_max_queues;
520 	}
521 
522 	/* bdev is tied with the virtio device; we can reuse the name */
523 	bdev->name = vdev->name;
524 	rc = virtio_dev_start(vdev, max_queues, 0);
525 	if (rc != 0) {
526 		return rc;
527 	}
528 
529 	bdev->product_name = "VirtioBlk Disk";
530 	bdev->write_cache = 0;
531 	bdev->blocklen = block_size;
532 	bdev->blockcnt = num_blocks;
533 
534 	bdev->ctxt = bvdev;
535 	bdev->fn_table = &virtio_fn_table;
536 	bdev->module = &virtio_blk_if;
537 
538 	spdk_io_device_register(bvdev, bdev_virtio_blk_ch_create_cb,
539 				bdev_virtio_blk_ch_destroy_cb,
540 				sizeof(struct bdev_virtio_blk_io_channel),
541 				vdev->name);
542 
543 	rc = spdk_bdev_register(bdev);
544 	if (rc) {
545 		SPDK_ERRLOG("Failed to register bdev name=%s\n", bdev->name);
546 		spdk_io_device_unregister(bvdev, NULL);
547 		virtio_dev_stop(vdev);
548 		return rc;
549 	}
550 
551 	return 0;
552 }
553 
554 static struct virtio_blk_dev *
555 virtio_pci_blk_dev_create(const char *name, struct virtio_pci_ctx *pci_ctx)
556 {
557 	static int pci_dev_counter = 0;
558 	struct virtio_blk_dev *bvdev;
559 	struct virtio_dev *vdev;
560 	char *default_name = NULL;
561 	uint16_t num_queues;
562 	int rc;
563 
564 	bvdev = calloc(1, sizeof(*bvdev));
565 	if (bvdev == NULL) {
566 		SPDK_ERRLOG("virtio device calloc failed\n");
567 		return NULL;
568 	}
569 	vdev = &bvdev->vdev;
570 
571 	if (name == NULL) {
572 		default_name = spdk_sprintf_alloc("VirtioBlk%"PRIu32, pci_dev_counter++);
573 		if (default_name == NULL) {
574 			free(vdev);
575 			return NULL;
576 		}
577 		name = default_name;
578 	}
579 
580 	rc = virtio_pci_dev_init(vdev, name, pci_ctx);
581 	free(default_name);
582 
583 	if (rc != 0) {
584 		free(bvdev);
585 		return NULL;
586 	}
587 
588 	rc = virtio_dev_reset(vdev, VIRTIO_BLK_DEV_SUPPORTED_FEATURES);
589 	if (rc != 0) {
590 		goto fail;
591 	}
592 
593 	/* TODO: add a way to limit usable virtqueues */
594 	if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_MQ)) {
595 		rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_blk_config, num_queues),
596 						&num_queues, sizeof(num_queues));
597 		if (rc) {
598 			SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc));
599 			goto fail;
600 		}
601 	} else {
602 		num_queues = 1;
603 	}
604 
605 	rc = virtio_blk_dev_init(bvdev, num_queues);
606 	if (rc != 0) {
607 		goto fail;
608 	}
609 
610 	return bvdev;
611 
612 fail:
613 	vdev->ctx = NULL;
614 	virtio_dev_destruct(vdev);
615 	free(bvdev);
616 	return NULL;
617 }
618 
619 static struct virtio_blk_dev *
620 virtio_user_blk_dev_create(const char *name, const char *path,
621 			   uint16_t num_queues, uint32_t queue_size)
622 {
623 	struct virtio_blk_dev *bvdev;
624 	int rc;
625 
626 	bvdev = calloc(1, sizeof(*bvdev));
627 	if (bvdev == NULL) {
628 		SPDK_ERRLOG("calloc failed for virtio device %s: %s\n", name, path);
629 		return NULL;
630 	}
631 
632 	rc = virtio_user_dev_init(&bvdev->vdev, name, path, queue_size);
633 	if (rc != 0) {
634 		SPDK_ERRLOG("Failed to create virito device %s: %s\n", name, path);
635 		free(bvdev);
636 		return NULL;
637 	}
638 
639 	rc = virtio_dev_reset(&bvdev->vdev, VIRTIO_BLK_DEV_SUPPORTED_FEATURES);
640 	if (rc != 0) {
641 		virtio_dev_destruct(&bvdev->vdev);
642 		free(bvdev);
643 		return NULL;
644 	}
645 
646 	rc = virtio_blk_dev_init(bvdev, num_queues);
647 	if (rc != 0) {
648 		virtio_dev_destruct(&bvdev->vdev);
649 		free(bvdev);
650 		return NULL;
651 	}
652 
653 	return bvdev;
654 }
655 
656 struct bdev_virtio_pci_dev_create_ctx {
657 	const char *name;
658 	struct virtio_blk_dev *ret;
659 };
660 
661 static int
662 bdev_virtio_pci_blk_dev_create_cb(struct virtio_pci_ctx *pci_ctx, void *ctx)
663 {
664 	struct bdev_virtio_pci_dev_create_ctx *create_ctx = ctx;
665 
666 	create_ctx->ret = virtio_pci_blk_dev_create(create_ctx->name, pci_ctx);
667 	if (create_ctx->ret == NULL) {
668 		return -1;
669 	}
670 
671 	return 0;
672 }
673 
674 struct spdk_bdev *
675 bdev_virtio_pci_blk_dev_create(const char *name, struct spdk_pci_addr *pci_addr)
676 {
677 	struct bdev_virtio_pci_dev_create_ctx create_ctx;
678 
679 	create_ctx.name = name;
680 	create_ctx.ret = NULL;
681 
682 	virtio_pci_dev_attach(bdev_virtio_pci_blk_dev_create_cb, &create_ctx,
683 			      VIRTIO_ID_BLOCK, pci_addr);
684 
685 	if (create_ctx.ret == NULL) {
686 		return NULL;
687 	}
688 
689 	return &create_ctx.ret->bdev;
690 }
691 
692 static int
693 bdev_virtio_initialize(void)
694 {
695 	return 0;
696 }
697 
698 struct spdk_bdev *
699 bdev_virtio_user_blk_dev_create(const char *name, const char *path,
700 				unsigned num_queues, unsigned queue_size)
701 {
702 	struct virtio_blk_dev *bvdev;
703 
704 	bvdev = virtio_user_blk_dev_create(name, path, num_queues, queue_size);
705 	if (bvdev == NULL) {
706 		return NULL;
707 	}
708 
709 	return &bvdev->bdev;
710 }
711 
712 static int
713 bdev_virtio_blk_get_ctx_size(void)
714 {
715 	return sizeof(struct virtio_blk_io_ctx);
716 }
717 
718 SPDK_LOG_REGISTER_COMPONENT(virtio_blk)
719