1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/bdev.h" 37 #include "spdk/endian.h" 38 #include "spdk/env.h" 39 #include "spdk/thread.h" 40 #include "spdk/string.h" 41 #include "spdk/util.h" 42 #include "spdk/json.h" 43 44 #include "spdk_internal/assert.h" 45 #include "spdk/bdev_module.h" 46 #include "spdk/log.h" 47 #include "spdk_internal/virtio.h" 48 #include "spdk_internal/vhost_user.h" 49 50 #include <linux/virtio_blk.h> 51 #include <linux/virtio_ids.h> 52 53 #include "bdev_virtio.h" 54 55 struct virtio_blk_dev { 56 struct virtio_dev vdev; 57 struct spdk_bdev bdev; 58 bool readonly; 59 bool unmap; 60 }; 61 62 struct virtio_blk_io_ctx { 63 struct iovec iov_req; 64 struct iovec iov_resp; 65 struct iovec iov_unmap; 66 struct virtio_blk_outhdr req; 67 struct virtio_blk_discard_write_zeroes unmap; 68 uint8_t resp; 69 }; 70 71 struct bdev_virtio_blk_io_channel { 72 struct virtio_dev *vdev; 73 74 /** Virtqueue exclusively assigned to this channel. */ 75 struct virtqueue *vq; 76 77 /** Virtio response poller. */ 78 struct spdk_poller *poller; 79 }; 80 81 /* Features desired/implemented by this driver. */ 82 #define VIRTIO_BLK_DEV_SUPPORTED_FEATURES \ 83 (1ULL << VIRTIO_BLK_F_SIZE_MAX | \ 84 1ULL << VIRTIO_BLK_F_SEG_MAX | \ 85 1ULL << VIRTIO_BLK_F_BLK_SIZE | \ 86 1ULL << VIRTIO_BLK_F_TOPOLOGY | \ 87 1ULL << VIRTIO_BLK_F_MQ | \ 88 1ULL << VIRTIO_BLK_F_RO | \ 89 1ULL << VIRTIO_BLK_F_DISCARD | \ 90 1ULL << VIRTIO_RING_F_EVENT_IDX | \ 91 1ULL << VHOST_USER_F_PROTOCOL_FEATURES) 92 93 /* 10 sec for max poll period */ 94 #define VIRTIO_BLK_HOTPLUG_POLL_PERIOD_MAX 10000000ULL 95 /* Default poll period is 100ms */ 96 #define VIRTIO_BLK_HOTPLUG_POLL_PERIOD_DEFAULT 100000ULL 97 98 static struct spdk_poller *g_blk_hotplug_poller = NULL; 99 static int g_blk_hotplug_fd = -1; 100 101 static int bdev_virtio_initialize(void); 102 static int bdev_virtio_blk_get_ctx_size(void); 103 104 static struct spdk_bdev_module virtio_blk_if = { 105 .name = "virtio_blk", 106 .module_init = bdev_virtio_initialize, 107 .get_ctx_size = bdev_virtio_blk_get_ctx_size, 108 }; 109 110 SPDK_BDEV_MODULE_REGISTER(virtio_blk, &virtio_blk_if) 111 112 static int bdev_virtio_blk_ch_create_cb(void *io_device, void *ctx_buf); 113 static void bdev_virtio_blk_ch_destroy_cb(void *io_device, void *ctx_buf); 114 115 static struct virtio_blk_io_ctx * 116 bdev_virtio_blk_init_io_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 117 { 118 struct virtio_blk_outhdr *req; 119 uint8_t *resp; 120 struct virtio_blk_discard_write_zeroes *desc; 121 122 struct virtio_blk_io_ctx *io_ctx = (struct virtio_blk_io_ctx *)bdev_io->driver_ctx; 123 124 req = &io_ctx->req; 125 resp = &io_ctx->resp; 126 desc = &io_ctx->unmap; 127 128 io_ctx->iov_req.iov_base = req; 129 io_ctx->iov_req.iov_len = sizeof(*req); 130 131 io_ctx->iov_resp.iov_base = resp; 132 io_ctx->iov_resp.iov_len = sizeof(*resp); 133 134 io_ctx->iov_unmap.iov_base = desc; 135 io_ctx->iov_unmap.iov_len = sizeof(*desc); 136 137 memset(req, 0, sizeof(*req)); 138 return io_ctx; 139 } 140 141 static void 142 bdev_virtio_blk_send_io(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 143 { 144 struct bdev_virtio_blk_io_channel *virtio_channel = spdk_io_channel_get_ctx(ch); 145 struct virtqueue *vq = virtio_channel->vq; 146 struct virtio_blk_io_ctx *io_ctx = (struct virtio_blk_io_ctx *)bdev_io->driver_ctx; 147 int rc; 148 149 rc = virtqueue_req_start(vq, bdev_io, bdev_io->u.bdev.iovcnt + 2); 150 if (rc == -ENOMEM) { 151 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 152 return; 153 } else if (rc != 0) { 154 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 155 return; 156 } 157 158 virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 159 if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP) { 160 virtqueue_req_add_iovs(vq, &io_ctx->iov_unmap, 1, SPDK_VIRTIO_DESC_RO); 161 } else { 162 virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 163 bdev_io->type == SPDK_BDEV_IO_TYPE_READ ? 164 SPDK_VIRTIO_DESC_WR : SPDK_VIRTIO_DESC_RO); 165 } 166 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 167 168 virtqueue_req_flush(vq); 169 } 170 171 static void 172 bdev_virtio_command(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 173 { 174 struct virtio_blk_io_ctx *io_ctx = bdev_virtio_blk_init_io_vreq(ch, bdev_io); 175 struct virtio_blk_outhdr *req = &io_ctx->req; 176 struct virtio_blk_discard_write_zeroes *desc = &io_ctx->unmap; 177 178 if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 179 req->type = VIRTIO_BLK_T_IN; 180 } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 181 req->type = VIRTIO_BLK_T_OUT; 182 } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP) { 183 req->type = VIRTIO_BLK_T_DISCARD; 184 desc->sector = bdev_io->u.bdev.offset_blocks * 185 spdk_bdev_get_block_size(bdev_io->bdev) / 512; 186 desc->num_sectors = bdev_io->u.bdev.num_blocks * 187 spdk_bdev_get_block_size(bdev_io->bdev) / 512; 188 desc->flags = 0; 189 } 190 191 req->sector = bdev_io->u.bdev.offset_blocks * 192 spdk_bdev_get_block_size(bdev_io->bdev) / 512; 193 194 bdev_virtio_blk_send_io(ch, bdev_io); 195 } 196 197 static void 198 bdev_virtio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 199 bool success) 200 { 201 if (!success) { 202 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 203 return; 204 } 205 206 bdev_virtio_command(ch, bdev_io); 207 } 208 209 static int 210 _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 211 { 212 struct virtio_blk_dev *bvdev = bdev_io->bdev->ctxt; 213 214 switch (bdev_io->type) { 215 case SPDK_BDEV_IO_TYPE_READ: 216 spdk_bdev_io_get_buf(bdev_io, bdev_virtio_get_buf_cb, 217 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 218 return 0; 219 case SPDK_BDEV_IO_TYPE_WRITE: 220 if (bvdev->readonly) { 221 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 222 } else { 223 bdev_virtio_command(ch, bdev_io); 224 } 225 return 0; 226 case SPDK_BDEV_IO_TYPE_RESET: 227 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 228 return 0; 229 case SPDK_BDEV_IO_TYPE_UNMAP: 230 if (bvdev->unmap) { 231 bdev_virtio_command(ch, bdev_io); 232 } else { 233 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 234 } 235 return 0; 236 case SPDK_BDEV_IO_TYPE_FLUSH: 237 default: 238 return -1; 239 } 240 241 SPDK_UNREACHABLE(); 242 } 243 244 static void 245 bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 246 { 247 if (_bdev_virtio_submit_request(ch, bdev_io) < 0) { 248 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 249 } 250 } 251 252 static bool 253 bdev_virtio_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) 254 { 255 struct virtio_blk_dev *bvdev = ctx; 256 257 switch (io_type) { 258 case SPDK_BDEV_IO_TYPE_READ: 259 case SPDK_BDEV_IO_TYPE_RESET: 260 return true; 261 case SPDK_BDEV_IO_TYPE_WRITE: 262 return !bvdev->readonly; 263 case SPDK_BDEV_IO_TYPE_UNMAP: 264 return bvdev->unmap; 265 case SPDK_BDEV_IO_TYPE_FLUSH: 266 default: 267 return false; 268 } 269 } 270 271 static struct spdk_io_channel * 272 bdev_virtio_get_io_channel(void *ctx) 273 { 274 struct virtio_blk_dev *bvdev = ctx; 275 276 return spdk_get_io_channel(bvdev); 277 } 278 279 static void 280 virtio_blk_dev_unregister_cb(void *io_device) 281 { 282 struct virtio_blk_dev *bvdev = io_device; 283 struct virtio_dev *vdev = &bvdev->vdev; 284 285 virtio_dev_stop(vdev); 286 virtio_dev_destruct(vdev); 287 spdk_bdev_destruct_done(&bvdev->bdev, 0); 288 free(bvdev); 289 } 290 291 static int 292 bdev_virtio_disk_destruct(void *ctx) 293 { 294 struct virtio_blk_dev *bvdev = ctx; 295 296 spdk_io_device_unregister(bvdev, virtio_blk_dev_unregister_cb); 297 return 1; 298 } 299 300 int 301 bdev_virtio_blk_dev_remove(const char *name, bdev_virtio_remove_cb cb_fn, void *cb_arg) 302 { 303 return spdk_bdev_unregister_by_name(name, &virtio_blk_if, cb_fn, cb_arg); 304 } 305 306 static int 307 bdev_virtio_dump_json_config(void *ctx, struct spdk_json_write_ctx *w) 308 { 309 struct virtio_blk_dev *bvdev = ctx; 310 311 virtio_dev_dump_json_info(&bvdev->vdev, w); 312 return 0; 313 } 314 315 static void 316 bdev_virtio_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w) 317 { 318 struct virtio_blk_dev *bvdev = bdev->ctxt; 319 320 spdk_json_write_object_begin(w); 321 322 spdk_json_write_named_string(w, "method", "bdev_virtio_attach_controller"); 323 324 spdk_json_write_named_object_begin(w, "params"); 325 spdk_json_write_named_string(w, "name", bvdev->vdev.name); 326 spdk_json_write_named_string(w, "dev_type", "blk"); 327 328 /* Write transport specific parameters. */ 329 bvdev->vdev.backend_ops->write_json_config(&bvdev->vdev, w); 330 331 spdk_json_write_object_end(w); 332 333 spdk_json_write_object_end(w); 334 } 335 336 static const struct spdk_bdev_fn_table virtio_fn_table = { 337 .destruct = bdev_virtio_disk_destruct, 338 .submit_request = bdev_virtio_submit_request, 339 .io_type_supported = bdev_virtio_io_type_supported, 340 .get_io_channel = bdev_virtio_get_io_channel, 341 .dump_info_json = bdev_virtio_dump_json_config, 342 .write_config_json = bdev_virtio_write_config_json, 343 }; 344 345 static void 346 bdev_virtio_io_cpl(struct spdk_bdev_io *bdev_io) 347 { 348 struct virtio_blk_io_ctx *io_ctx = (struct virtio_blk_io_ctx *)bdev_io->driver_ctx; 349 350 spdk_bdev_io_complete(bdev_io, io_ctx->resp == VIRTIO_BLK_S_OK ? 351 SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED); 352 } 353 354 static int 355 bdev_virtio_poll(void *arg) 356 { 357 struct bdev_virtio_blk_io_channel *ch = arg; 358 void *io[32]; 359 uint32_t io_len[32]; 360 uint16_t i, cnt; 361 362 cnt = virtio_recv_pkts(ch->vq, io, io_len, SPDK_COUNTOF(io)); 363 for (i = 0; i < cnt; ++i) { 364 bdev_virtio_io_cpl(io[i]); 365 } 366 367 return cnt; 368 } 369 370 static int 371 bdev_virtio_blk_ch_create_cb(void *io_device, void *ctx_buf) 372 { 373 struct virtio_blk_dev *bvdev = io_device; 374 struct virtio_dev *vdev = &bvdev->vdev; 375 struct bdev_virtio_blk_io_channel *ch = ctx_buf; 376 struct virtqueue *vq; 377 int32_t queue_idx; 378 379 queue_idx = virtio_dev_find_and_acquire_queue(vdev, 0); 380 if (queue_idx < 0) { 381 SPDK_ERRLOG("Couldn't get an unused queue for the io_channel.\n"); 382 return -1; 383 } 384 385 vq = vdev->vqs[queue_idx]; 386 387 ch->vdev = vdev; 388 ch->vq = vq; 389 390 ch->poller = SPDK_POLLER_REGISTER(bdev_virtio_poll, ch, 0); 391 return 0; 392 } 393 394 static void 395 bdev_virtio_blk_ch_destroy_cb(void *io_device, void *ctx_buf) 396 { 397 struct virtio_blk_dev *bvdev = io_device; 398 struct virtio_dev *vdev = &bvdev->vdev; 399 struct bdev_virtio_blk_io_channel *ch = ctx_buf; 400 struct virtqueue *vq = ch->vq; 401 402 spdk_poller_unregister(&ch->poller); 403 virtio_dev_release_queue(vdev, vq->vq_queue_index); 404 } 405 406 static int 407 virtio_blk_dev_init(struct virtio_blk_dev *bvdev, uint16_t max_queues) 408 { 409 struct virtio_dev *vdev = &bvdev->vdev; 410 struct spdk_bdev *bdev = &bvdev->bdev; 411 uint64_t capacity, num_blocks; 412 uint32_t block_size, size_max, seg_max; 413 uint16_t host_max_queues; 414 int rc; 415 416 if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE)) { 417 rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_blk_config, blk_size), 418 &block_size, sizeof(block_size)); 419 if (rc) { 420 SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc)); 421 return rc; 422 } 423 424 if (block_size == 0 || block_size % 512 != 0) { 425 SPDK_ERRLOG("%s: invalid block size (%"PRIu32"). Must be " 426 "a multiple of 512.\n", vdev->name, block_size); 427 return -EIO; 428 } 429 } else { 430 block_size = 512; 431 } 432 433 rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_blk_config, capacity), 434 &capacity, sizeof(capacity)); 435 if (rc) { 436 SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc)); 437 return rc; 438 } 439 440 /* `capacity` is a number of 512-byte sectors. */ 441 num_blocks = capacity * 512 / block_size; 442 if (num_blocks == 0) { 443 SPDK_ERRLOG("%s: size too small (size: %"PRIu64", blocksize: %"PRIu32").\n", 444 vdev->name, capacity * 512, block_size); 445 return -EIO; 446 } 447 448 if ((capacity * 512) % block_size != 0) { 449 SPDK_WARNLOG("%s: size has been rounded down to the nearest block size boundary. " 450 "(block size: %"PRIu32", previous size: %"PRIu64", new size: %"PRIu64")\n", 451 vdev->name, block_size, capacity * 512, num_blocks * block_size); 452 } 453 454 if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_MQ)) { 455 rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_blk_config, num_queues), 456 &host_max_queues, sizeof(host_max_queues)); 457 if (rc) { 458 SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc)); 459 return rc; 460 } 461 } else { 462 host_max_queues = 1; 463 } 464 465 if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_SIZE_MAX)) { 466 rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_blk_config, size_max), 467 &size_max, sizeof(size_max)); 468 if (rc) { 469 SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc)); 470 return rc; 471 } 472 473 if (spdk_unlikely(size_max < block_size)) { 474 SPDK_WARNLOG("%s: minimum segment size is set to block size %u forcefully.\n", 475 vdev->name, block_size); 476 size_max = block_size; 477 } 478 479 bdev->max_segment_size = size_max; 480 } 481 482 if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_SEG_MAX)) { 483 rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_blk_config, seg_max), 484 &seg_max, sizeof(seg_max)); 485 if (rc) { 486 SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc)); 487 return rc; 488 } 489 490 if (spdk_unlikely(seg_max == 0)) { 491 SPDK_ERRLOG("%s: virtio blk SEG_MAX can't be 0\n", vdev->name); 492 return -EINVAL; 493 } 494 495 bdev->max_num_segments = seg_max; 496 } 497 498 if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_RO)) { 499 bvdev->readonly = true; 500 } 501 502 if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { 503 bvdev->unmap = true; 504 } 505 506 if (max_queues == 0) { 507 SPDK_ERRLOG("%s: requested 0 request queues (%"PRIu16" available).\n", 508 vdev->name, host_max_queues); 509 return -EINVAL; 510 } 511 512 if (max_queues > host_max_queues) { 513 SPDK_WARNLOG("%s: requested %"PRIu16" request queues " 514 "but only %"PRIu16" available.\n", 515 vdev->name, max_queues, host_max_queues); 516 max_queues = host_max_queues; 517 } 518 519 /* bdev is tied with the virtio device; we can reuse the name */ 520 bdev->name = vdev->name; 521 rc = virtio_dev_start(vdev, max_queues, 0); 522 if (rc != 0) { 523 return rc; 524 } 525 526 bdev->product_name = "VirtioBlk Disk"; 527 bdev->write_cache = 0; 528 bdev->blocklen = block_size; 529 bdev->blockcnt = num_blocks; 530 531 bdev->ctxt = bvdev; 532 bdev->fn_table = &virtio_fn_table; 533 bdev->module = &virtio_blk_if; 534 535 spdk_io_device_register(bvdev, bdev_virtio_blk_ch_create_cb, 536 bdev_virtio_blk_ch_destroy_cb, 537 sizeof(struct bdev_virtio_blk_io_channel), 538 vdev->name); 539 540 rc = spdk_bdev_register(bdev); 541 if (rc) { 542 SPDK_ERRLOG("Failed to register bdev name=%s\n", bdev->name); 543 spdk_io_device_unregister(bvdev, NULL); 544 virtio_dev_stop(vdev); 545 return rc; 546 } 547 548 return 0; 549 } 550 551 static struct virtio_blk_dev * 552 virtio_pci_blk_dev_create(const char *name, struct virtio_pci_ctx *pci_ctx) 553 { 554 static int pci_dev_counter = 0; 555 struct virtio_blk_dev *bvdev; 556 struct virtio_dev *vdev; 557 char *default_name = NULL; 558 uint16_t num_queues; 559 int rc; 560 561 bvdev = calloc(1, sizeof(*bvdev)); 562 if (bvdev == NULL) { 563 SPDK_ERRLOG("virtio device calloc failed\n"); 564 return NULL; 565 } 566 vdev = &bvdev->vdev; 567 568 if (name == NULL) { 569 default_name = spdk_sprintf_alloc("VirtioBlk%"PRIu32, pci_dev_counter++); 570 if (default_name == NULL) { 571 free(vdev); 572 return NULL; 573 } 574 name = default_name; 575 } 576 577 rc = virtio_pci_dev_init(vdev, name, pci_ctx); 578 free(default_name); 579 580 if (rc != 0) { 581 free(bvdev); 582 return NULL; 583 } 584 585 rc = virtio_dev_reset(vdev, VIRTIO_BLK_DEV_SUPPORTED_FEATURES); 586 if (rc != 0) { 587 goto fail; 588 } 589 590 /* TODO: add a way to limit usable virtqueues */ 591 if (virtio_dev_has_feature(vdev, VIRTIO_BLK_F_MQ)) { 592 rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_blk_config, num_queues), 593 &num_queues, sizeof(num_queues)); 594 if (rc) { 595 SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc)); 596 goto fail; 597 } 598 } else { 599 num_queues = 1; 600 } 601 602 rc = virtio_blk_dev_init(bvdev, num_queues); 603 if (rc != 0) { 604 goto fail; 605 } 606 607 return bvdev; 608 609 fail: 610 vdev->ctx = NULL; 611 virtio_dev_destruct(vdev); 612 free(bvdev); 613 return NULL; 614 } 615 616 static struct virtio_blk_dev * 617 virtio_user_blk_dev_create(const char *name, const char *path, 618 uint16_t num_queues, uint32_t queue_size) 619 { 620 struct virtio_blk_dev *bvdev; 621 int rc; 622 623 bvdev = calloc(1, sizeof(*bvdev)); 624 if (bvdev == NULL) { 625 SPDK_ERRLOG("calloc failed for virtio device %s: %s\n", name, path); 626 return NULL; 627 } 628 629 rc = virtio_user_dev_init(&bvdev->vdev, name, path, queue_size); 630 if (rc != 0) { 631 SPDK_ERRLOG("Failed to create virito device %s: %s\n", name, path); 632 free(bvdev); 633 return NULL; 634 } 635 636 rc = virtio_dev_reset(&bvdev->vdev, VIRTIO_BLK_DEV_SUPPORTED_FEATURES); 637 if (rc != 0) { 638 virtio_dev_destruct(&bvdev->vdev); 639 free(bvdev); 640 return NULL; 641 } 642 643 rc = virtio_blk_dev_init(bvdev, num_queues); 644 if (rc != 0) { 645 virtio_dev_destruct(&bvdev->vdev); 646 free(bvdev); 647 return NULL; 648 } 649 650 return bvdev; 651 } 652 653 struct bdev_virtio_pci_dev_create_ctx { 654 const char *name; 655 struct virtio_blk_dev *ret; 656 }; 657 658 static int 659 bdev_virtio_pci_blk_dev_create_cb(struct virtio_pci_ctx *pci_ctx, void *ctx) 660 { 661 struct bdev_virtio_pci_dev_create_ctx *create_ctx = ctx; 662 663 create_ctx->ret = virtio_pci_blk_dev_create(create_ctx->name, pci_ctx); 664 if (create_ctx->ret == NULL) { 665 return -1; 666 } 667 668 return 0; 669 } 670 671 struct spdk_bdev * 672 bdev_virtio_pci_blk_dev_create(const char *name, struct spdk_pci_addr *pci_addr) 673 { 674 struct bdev_virtio_pci_dev_create_ctx create_ctx; 675 676 create_ctx.name = name; 677 create_ctx.ret = NULL; 678 679 virtio_pci_dev_attach(bdev_virtio_pci_blk_dev_create_cb, &create_ctx, 680 VIRTIO_ID_BLOCK, pci_addr); 681 682 if (create_ctx.ret == NULL) { 683 return NULL; 684 } 685 686 return &create_ctx.ret->bdev; 687 } 688 689 static int 690 bdev_virtio_pci_blk_monitor(void *arg) 691 { 692 const char *vdev_name; 693 struct bdev_virtio_pci_dev_create_ctx create_ctx; 694 695 while ((vdev_name = virtio_pci_dev_event_process(g_blk_hotplug_fd, VIRTIO_ID_BLOCK)) != NULL) { 696 bdev_virtio_blk_dev_remove(vdev_name, NULL, NULL); 697 } 698 699 /* Enumerate virtio pci_blk device */ 700 memset(&create_ctx, 0, sizeof(create_ctx)); 701 virtio_pci_dev_enumerate(bdev_virtio_pci_blk_dev_create_cb, &create_ctx, 702 VIRTIO_ID_BLOCK); 703 704 return SPDK_POLLER_BUSY; 705 } 706 707 int 708 bdev_virtio_pci_blk_set_hotplug(bool enabled, uint64_t period_us) 709 { 710 if (enabled == true && !spdk_process_is_primary()) { 711 return -EPERM; 712 } 713 714 if (g_blk_hotplug_poller) { 715 close(g_blk_hotplug_fd); 716 spdk_poller_unregister(&g_blk_hotplug_poller); 717 } 718 719 if (!enabled) { 720 return 0; 721 } 722 723 g_blk_hotplug_fd = spdk_pci_event_listen(); 724 if (g_blk_hotplug_fd < 0) { 725 return g_blk_hotplug_fd; 726 } 727 728 period_us = period_us ? period_us : VIRTIO_BLK_HOTPLUG_POLL_PERIOD_DEFAULT; 729 period_us = spdk_min(period_us, VIRTIO_BLK_HOTPLUG_POLL_PERIOD_MAX); 730 g_blk_hotplug_poller = spdk_poller_register(bdev_virtio_pci_blk_monitor, NULL, period_us); 731 if (!g_blk_hotplug_poller) { 732 close(g_blk_hotplug_fd); 733 return -1; 734 } 735 736 return 0; 737 } 738 739 static int 740 bdev_virtio_initialize(void) 741 { 742 return 0; 743 } 744 745 struct spdk_bdev * 746 bdev_virtio_user_blk_dev_create(const char *name, const char *path, 747 unsigned num_queues, unsigned queue_size) 748 { 749 struct virtio_blk_dev *bvdev; 750 751 bvdev = virtio_user_blk_dev_create(name, path, num_queues, queue_size); 752 if (bvdev == NULL) { 753 return NULL; 754 } 755 756 return &bvdev->bdev; 757 } 758 759 static int 760 bdev_virtio_blk_get_ctx_size(void) 761 { 762 return sizeof(struct virtio_blk_io_ctx); 763 } 764 765 SPDK_LOG_REGISTER_COMPONENT(virtio_blk) 766