1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/bdev.h" 37 #include "spdk/conf.h" 38 #include "spdk/endian.h" 39 #include "spdk/env.h" 40 #include "spdk/thread.h" 41 #include "spdk/scsi_spec.h" 42 #include "spdk/string.h" 43 #include "spdk/util.h" 44 #include "spdk/json.h" 45 46 #include "spdk/bdev_module.h" 47 #include "spdk_internal/log.h" 48 #include "spdk_internal/virtio.h" 49 50 #include <linux/virtio_scsi.h> 51 52 #include "bdev_virtio.h" 53 54 #define BDEV_VIRTIO_MAX_TARGET 64 55 #define BDEV_VIRTIO_SCAN_PAYLOAD_SIZE 256 56 #define MGMT_POLL_PERIOD_US (1000 * 5) 57 #define CTRLQ_RING_SIZE 16 58 #define SCAN_REQUEST_RETRIES 5 59 60 /* Number of non-request queues - eventq and controlq */ 61 #define SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED 2 62 63 #define VIRTIO_SCSI_EVENTQ_BUFFER_COUNT 16 64 65 #define VIRTIO_SCSI_CONTROLQ 0 66 #define VIRTIO_SCSI_EVENTQ 1 67 #define VIRTIO_SCSI_REQUESTQ 2 68 69 static int bdev_virtio_initialize(void); 70 static void bdev_virtio_finish(void); 71 72 struct virtio_scsi_dev { 73 /* Generic virtio device data. */ 74 struct virtio_dev vdev; 75 76 /** Detected SCSI LUNs */ 77 TAILQ_HEAD(, virtio_scsi_disk) luns; 78 79 /** Context for the SCSI target scan. */ 80 struct virtio_scsi_scan_base *scan_ctx; 81 82 /** Controlq poller. */ 83 struct spdk_poller *mgmt_poller; 84 85 /** Controlq messages to be sent. */ 86 struct spdk_ring *ctrlq_ring; 87 88 /** Buffers for the eventq. */ 89 struct virtio_scsi_eventq_io *eventq_ios; 90 91 /** Device marked for removal. */ 92 bool removed; 93 94 /** Callback to be called after vdev removal. */ 95 bdev_virtio_remove_cb remove_cb; 96 97 /** Context for the `remove_cb`. */ 98 void *remove_ctx; 99 100 TAILQ_ENTRY(virtio_scsi_dev) tailq; 101 }; 102 103 struct virtio_scsi_io_ctx { 104 struct iovec iov_req; 105 struct iovec iov_resp; 106 union { 107 struct virtio_scsi_cmd_req req; 108 struct virtio_scsi_ctrl_tmf_req tmf_req; 109 }; 110 union { 111 struct virtio_scsi_cmd_resp resp; 112 struct virtio_scsi_ctrl_tmf_resp tmf_resp; 113 }; 114 }; 115 116 struct virtio_scsi_eventq_io { 117 struct iovec iov; 118 struct virtio_scsi_event ev; 119 }; 120 121 struct virtio_scsi_scan_info { 122 uint64_t num_blocks; 123 uint32_t block_size; 124 uint8_t target; 125 bool unmap_supported; 126 TAILQ_ENTRY(virtio_scsi_scan_info) tailq; 127 }; 128 129 struct virtio_scsi_scan_base { 130 struct virtio_scsi_dev *svdev; 131 132 /** I/O channel used for the scan I/O. */ 133 struct bdev_virtio_io_channel *channel; 134 135 bdev_virtio_create_cb cb_fn; 136 void *cb_arg; 137 138 /** Scan all targets on the device. */ 139 bool full_scan; 140 141 /** Start a full rescan after receiving next scan I/O response. */ 142 bool restart; 143 144 /** Additional targets to be (re)scanned. */ 145 TAILQ_HEAD(, virtio_scsi_scan_info) scan_queue; 146 147 /** Remaining attempts for sending the current request. */ 148 unsigned retries; 149 150 /** If set, the last scan I/O needs to be resent */ 151 bool needs_resend; 152 153 struct virtio_scsi_io_ctx io_ctx; 154 struct iovec iov; 155 uint8_t payload[BDEV_VIRTIO_SCAN_PAYLOAD_SIZE]; 156 157 /** Scan results for the current target. */ 158 struct virtio_scsi_scan_info info; 159 }; 160 161 struct virtio_scsi_disk { 162 struct spdk_bdev bdev; 163 struct virtio_scsi_dev *svdev; 164 struct virtio_scsi_scan_info info; 165 166 /** Descriptor opened just to be notified of external bdev hotremove. */ 167 struct spdk_bdev_desc *notify_desc; 168 169 /** Disk marked for removal. */ 170 bool removed; 171 TAILQ_ENTRY(virtio_scsi_disk) link; 172 }; 173 174 struct bdev_virtio_io_channel { 175 struct virtio_scsi_dev *svdev; 176 177 /** Virtqueue exclusively assigned to this channel. */ 178 struct virtqueue *vq; 179 180 /** Virtio response poller. */ 181 struct spdk_poller *poller; 182 }; 183 184 static TAILQ_HEAD(, virtio_scsi_dev) g_virtio_scsi_devs = 185 TAILQ_HEAD_INITIALIZER(g_virtio_scsi_devs); 186 187 static pthread_mutex_t g_virtio_scsi_mutex = PTHREAD_MUTEX_INITIALIZER; 188 189 /** Module finish in progress */ 190 static bool g_bdev_virtio_finish = false; 191 192 /* Features desired/implemented by this driver. */ 193 #define VIRTIO_SCSI_DEV_SUPPORTED_FEATURES \ 194 (1ULL << VIRTIO_SCSI_F_INOUT | \ 195 1ULL << VIRTIO_SCSI_F_HOTPLUG | \ 196 1ULL << VIRTIO_RING_F_EVENT_IDX | \ 197 1ULL << VHOST_USER_F_PROTOCOL_FEATURES) 198 199 static void virtio_scsi_dev_unregister_cb(void *io_device); 200 static void virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev, 201 bdev_virtio_remove_cb cb_fn, void *cb_arg); 202 static int bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf); 203 static void bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf); 204 static void process_scan_resp(struct virtio_scsi_scan_base *base); 205 static int bdev_virtio_mgmt_poll(void *arg); 206 207 static int 208 virtio_scsi_dev_send_eventq_io(struct virtqueue *vq, struct virtio_scsi_eventq_io *io) 209 { 210 int rc; 211 212 rc = virtqueue_req_start(vq, io, 1); 213 if (rc != 0) { 214 return -1; 215 } 216 217 virtqueue_req_add_iovs(vq, &io->iov, 1, SPDK_VIRTIO_DESC_WR); 218 virtqueue_req_flush(vq); 219 220 return 0; 221 } 222 223 static int 224 virtio_scsi_dev_init(struct virtio_scsi_dev *svdev, uint16_t max_queues) 225 { 226 struct virtio_dev *vdev = &svdev->vdev; 227 struct spdk_ring *ctrlq_ring; 228 struct virtio_scsi_eventq_io *eventq_io; 229 struct virtqueue *eventq; 230 uint16_t i, num_events; 231 int rc; 232 233 rc = virtio_dev_reset(vdev, VIRTIO_SCSI_DEV_SUPPORTED_FEATURES); 234 if (rc != 0) { 235 return rc; 236 } 237 238 rc = virtio_dev_start(vdev, max_queues, SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED); 239 if (rc != 0) { 240 return rc; 241 } 242 243 ctrlq_ring = spdk_ring_create(SPDK_RING_TYPE_MP_SC, CTRLQ_RING_SIZE, 244 SPDK_ENV_SOCKET_ID_ANY); 245 if (ctrlq_ring == NULL) { 246 SPDK_ERRLOG("Failed to allocate send ring for the controlq.\n"); 247 return -1; 248 } 249 250 rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_CONTROLQ); 251 if (rc != 0) { 252 SPDK_ERRLOG("Failed to acquire the controlq.\n"); 253 spdk_ring_free(ctrlq_ring); 254 return -1; 255 } 256 257 rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_EVENTQ); 258 if (rc != 0) { 259 SPDK_ERRLOG("Failed to acquire the eventq.\n"); 260 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 261 spdk_ring_free(ctrlq_ring); 262 return -1; 263 } 264 265 eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ]; 266 num_events = spdk_min(eventq->vq_nentries, VIRTIO_SCSI_EVENTQ_BUFFER_COUNT); 267 svdev->eventq_ios = spdk_zmalloc(sizeof(*svdev->eventq_ios) * num_events, 268 0, NULL, SPDK_ENV_LCORE_ID_ANY, 269 SPDK_MALLOC_DMA); 270 if (svdev->eventq_ios == NULL) { 271 SPDK_ERRLOG("cannot allocate memory for %"PRIu16" eventq buffers\n", 272 num_events); 273 virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ); 274 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 275 spdk_ring_free(ctrlq_ring); 276 return -1; 277 } 278 279 for (i = 0; i < num_events; i++) { 280 eventq_io = &svdev->eventq_ios[i]; 281 eventq_io->iov.iov_base = &eventq_io->ev; 282 eventq_io->iov.iov_len = sizeof(eventq_io->ev); 283 virtio_scsi_dev_send_eventq_io(eventq, eventq_io); 284 } 285 286 svdev->ctrlq_ring = ctrlq_ring; 287 288 svdev->mgmt_poller = spdk_poller_register(bdev_virtio_mgmt_poll, svdev, 289 MGMT_POLL_PERIOD_US); 290 291 TAILQ_INIT(&svdev->luns); 292 svdev->scan_ctx = NULL; 293 svdev->removed = false; 294 svdev->remove_cb = NULL; 295 svdev->remove_ctx = NULL; 296 297 spdk_io_device_register(svdev, bdev_virtio_scsi_ch_create_cb, 298 bdev_virtio_scsi_ch_destroy_cb, 299 sizeof(struct bdev_virtio_io_channel), 300 svdev->vdev.name); 301 302 pthread_mutex_lock(&g_virtio_scsi_mutex); 303 TAILQ_INSERT_TAIL(&g_virtio_scsi_devs, svdev, tailq); 304 pthread_mutex_unlock(&g_virtio_scsi_mutex); 305 return 0; 306 } 307 308 static struct virtio_scsi_dev * 309 virtio_pci_scsi_dev_create(const char *name, struct virtio_pci_ctx *pci_ctx) 310 { 311 static int pci_dev_counter = 0; 312 struct virtio_scsi_dev *svdev; 313 struct virtio_dev *vdev; 314 char *default_name = NULL; 315 uint32_t num_queues; 316 int rc; 317 318 svdev = calloc(1, sizeof(*svdev)); 319 if (svdev == NULL) { 320 SPDK_ERRLOG("virtio device calloc failed\n"); 321 return NULL; 322 } 323 324 vdev = &svdev->vdev; 325 if (name == NULL) { 326 default_name = spdk_sprintf_alloc("VirtioScsi%"PRIu32, pci_dev_counter++); 327 if (default_name == NULL) { 328 free(vdev); 329 return NULL; 330 } 331 name = default_name; 332 } 333 334 rc = virtio_pci_dev_init(vdev, name, pci_ctx); 335 free(default_name); 336 337 if (rc != 0) { 338 free(svdev); 339 return NULL; 340 } 341 342 rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_scsi_config, num_queues), 343 &num_queues, sizeof(num_queues)); 344 if (rc) { 345 SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc)); 346 virtio_dev_destruct(vdev); 347 free(svdev); 348 return NULL; 349 } 350 351 rc = virtio_scsi_dev_init(svdev, num_queues); 352 if (rc != 0) { 353 virtio_dev_destruct(vdev); 354 free(svdev); 355 return NULL; 356 } 357 358 return svdev; 359 } 360 361 static struct virtio_scsi_dev * 362 virtio_user_scsi_dev_create(const char *name, const char *path, 363 uint16_t num_queues, uint32_t queue_size) 364 { 365 struct virtio_scsi_dev *svdev; 366 struct virtio_dev *vdev; 367 int rc; 368 369 svdev = calloc(1, sizeof(*svdev)); 370 if (svdev == NULL) { 371 SPDK_ERRLOG("calloc failed for virtio device %s: %s\n", name, path); 372 return NULL; 373 } 374 375 vdev = &svdev->vdev; 376 rc = virtio_user_dev_init(vdev, name, path, queue_size); 377 if (rc != 0) { 378 SPDK_ERRLOG("Failed to create virito device %s: %s\n", name, path); 379 free(svdev); 380 return NULL; 381 } 382 383 rc = virtio_scsi_dev_init(svdev, num_queues); 384 if (rc != 0) { 385 virtio_dev_destruct(vdev); 386 free(svdev); 387 return NULL; 388 } 389 390 return svdev; 391 } 392 393 static struct virtio_scsi_disk * 394 virtio_scsi_dev_get_disk_by_id(struct virtio_scsi_dev *svdev, uint8_t target_id) 395 { 396 struct virtio_scsi_disk *disk; 397 398 TAILQ_FOREACH(disk, &svdev->luns, link) { 399 if (disk->info.target == target_id) { 400 return disk; 401 } 402 } 403 404 return NULL; 405 } 406 407 static int virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev, 408 bdev_virtio_create_cb cb_fn, void *cb_arg); 409 static int send_scan_io(struct virtio_scsi_scan_base *base); 410 static void _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target); 411 static int _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc); 412 static void _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum); 413 static int virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target); 414 415 static int 416 bdev_virtio_get_ctx_size(void) 417 { 418 return sizeof(struct virtio_scsi_io_ctx); 419 } 420 421 static int 422 bdev_virtio_scsi_config_json(struct spdk_json_write_ctx *w) 423 { 424 struct virtio_scsi_dev *svdev; 425 426 pthread_mutex_lock(&g_virtio_scsi_mutex); 427 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 428 spdk_json_write_object_begin(w); 429 430 spdk_json_write_named_string(w, "method", "bdev_virtio_attach_controller"); 431 432 spdk_json_write_named_object_begin(w, "params"); 433 spdk_json_write_named_string(w, "name", svdev->vdev.name); 434 spdk_json_write_named_string(w, "dev_type", "scsi"); 435 436 /* Write transport specific parameters. */ 437 svdev->vdev.backend_ops->write_json_config(&svdev->vdev, w); 438 439 spdk_json_write_object_end(w); 440 441 spdk_json_write_object_end(w); 442 443 } 444 pthread_mutex_unlock(&g_virtio_scsi_mutex); 445 446 return 0; 447 } 448 449 450 static struct spdk_bdev_module virtio_scsi_if = { 451 .name = "virtio_scsi", 452 .module_init = bdev_virtio_initialize, 453 .module_fini = bdev_virtio_finish, 454 .get_ctx_size = bdev_virtio_get_ctx_size, 455 .config_json = bdev_virtio_scsi_config_json, 456 .async_init = true, 457 .async_fini = true, 458 }; 459 460 SPDK_BDEV_MODULE_REGISTER(virtio_scsi, &virtio_scsi_if) 461 462 static struct virtio_scsi_io_ctx * 463 bdev_virtio_init_io_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 464 { 465 struct virtio_scsi_cmd_req *req; 466 struct virtio_scsi_cmd_resp *resp; 467 struct virtio_scsi_disk *disk = (struct virtio_scsi_disk *)bdev_io->bdev; 468 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 469 470 req = &io_ctx->req; 471 resp = &io_ctx->resp; 472 473 io_ctx->iov_req.iov_base = req; 474 io_ctx->iov_req.iov_len = sizeof(*req); 475 476 io_ctx->iov_resp.iov_base = resp; 477 io_ctx->iov_resp.iov_len = sizeof(*resp); 478 479 memset(req, 0, sizeof(*req)); 480 req->lun[0] = 1; 481 req->lun[1] = disk->info.target; 482 483 return io_ctx; 484 } 485 486 static struct virtio_scsi_io_ctx * 487 bdev_virtio_init_tmf_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 488 { 489 struct virtio_scsi_ctrl_tmf_req *tmf_req; 490 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 491 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 492 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 493 494 tmf_req = &io_ctx->tmf_req; 495 tmf_resp = &io_ctx->tmf_resp; 496 497 io_ctx->iov_req.iov_base = tmf_req; 498 io_ctx->iov_req.iov_len = sizeof(*tmf_req); 499 io_ctx->iov_resp.iov_base = tmf_resp; 500 io_ctx->iov_resp.iov_len = sizeof(*tmf_resp); 501 502 memset(tmf_req, 0, sizeof(*tmf_req)); 503 tmf_req->lun[0] = 1; 504 tmf_req->lun[1] = disk->info.target; 505 506 return io_ctx; 507 } 508 509 static void 510 bdev_virtio_send_io(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 511 { 512 struct bdev_virtio_io_channel *virtio_channel = spdk_io_channel_get_ctx(ch); 513 struct virtqueue *vq = virtio_channel->vq; 514 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 515 int rc; 516 517 rc = virtqueue_req_start(vq, bdev_io, bdev_io->u.bdev.iovcnt + 2); 518 if (rc == -ENOMEM) { 519 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 520 return; 521 } else if (rc != 0) { 522 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 523 return; 524 } 525 526 virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 527 if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 528 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 529 virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 530 SPDK_VIRTIO_DESC_WR); 531 } else { 532 virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 533 SPDK_VIRTIO_DESC_RO); 534 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 535 } 536 537 virtqueue_req_flush(vq); 538 } 539 540 static void 541 bdev_virtio_rw(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 542 { 543 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 544 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io); 545 struct virtio_scsi_cmd_req *req = &io_ctx->req; 546 bool is_write = bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE; 547 548 if (disk->info.num_blocks > (1ULL << 32)) { 549 req->cdb[0] = is_write ? SPDK_SBC_WRITE_16 : SPDK_SBC_READ_16; 550 to_be64(&req->cdb[2], bdev_io->u.bdev.offset_blocks); 551 to_be32(&req->cdb[10], bdev_io->u.bdev.num_blocks); 552 } else { 553 req->cdb[0] = is_write ? SPDK_SBC_WRITE_10 : SPDK_SBC_READ_10; 554 to_be32(&req->cdb[2], bdev_io->u.bdev.offset_blocks); 555 to_be16(&req->cdb[7], bdev_io->u.bdev.num_blocks); 556 } 557 558 bdev_virtio_send_io(ch, bdev_io); 559 } 560 561 static void 562 bdev_virtio_reset(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 563 { 564 struct bdev_virtio_io_channel *virtio_ch = spdk_io_channel_get_ctx(ch); 565 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_tmf_vreq(ch, bdev_io); 566 struct virtio_scsi_ctrl_tmf_req *tmf_req = &io_ctx->tmf_req; 567 struct virtio_scsi_dev *svdev = virtio_ch->svdev; 568 size_t enqueued_count; 569 570 tmf_req->type = VIRTIO_SCSI_T_TMF; 571 tmf_req->subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET; 572 573 enqueued_count = spdk_ring_enqueue(svdev->ctrlq_ring, (void **)&bdev_io, 1, NULL); 574 if (spdk_likely(enqueued_count == 1)) { 575 return; 576 } else { 577 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 578 } 579 } 580 581 static void 582 bdev_virtio_unmap(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success) 583 { 584 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io); 585 struct virtio_scsi_cmd_req *req = &io_ctx->req; 586 struct spdk_scsi_unmap_bdesc *desc, *first_desc; 587 uint8_t *buf; 588 uint64_t offset_blocks, num_blocks; 589 uint16_t cmd_len; 590 591 if (!success) { 592 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 593 return; 594 } 595 596 buf = bdev_io->u.bdev.iovs[0].iov_base; 597 598 offset_blocks = bdev_io->u.bdev.offset_blocks; 599 num_blocks = bdev_io->u.bdev.num_blocks; 600 601 /* (n-1) * 16-byte descriptors */ 602 first_desc = desc = (struct spdk_scsi_unmap_bdesc *)&buf[8]; 603 while (num_blocks > UINT32_MAX) { 604 to_be64(&desc->lba, offset_blocks); 605 to_be32(&desc->block_count, UINT32_MAX); 606 memset(&desc->reserved, 0, sizeof(desc->reserved)); 607 offset_blocks += UINT32_MAX; 608 num_blocks -= UINT32_MAX; 609 desc++; 610 } 611 612 /* The last descriptor with block_count <= UINT32_MAX */ 613 to_be64(&desc->lba, offset_blocks); 614 to_be32(&desc->block_count, num_blocks); 615 memset(&desc->reserved, 0, sizeof(desc->reserved)); 616 617 /* 8-byte header + n * 16-byte block descriptor */ 618 cmd_len = 8 + (desc - first_desc + 1) * sizeof(struct spdk_scsi_unmap_bdesc); 619 620 req->cdb[0] = SPDK_SBC_UNMAP; 621 to_be16(&req->cdb[7], cmd_len); 622 623 /* 8-byte header */ 624 to_be16(&buf[0], cmd_len - 2); /* total length (excluding the length field) */ 625 to_be16(&buf[2], cmd_len - 8); /* length of block descriptors */ 626 memset(&buf[4], 0, 4); /* reserved */ 627 628 bdev_virtio_send_io(ch, bdev_io); 629 } 630 631 static void 632 bdev_virtio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 633 bool success) 634 { 635 if (!success) { 636 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 637 return; 638 } 639 640 bdev_virtio_rw(ch, bdev_io); 641 } 642 643 static int _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 644 { 645 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 646 647 switch (bdev_io->type) { 648 case SPDK_BDEV_IO_TYPE_READ: 649 spdk_bdev_io_get_buf(bdev_io, bdev_virtio_get_buf_cb, 650 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 651 return 0; 652 case SPDK_BDEV_IO_TYPE_WRITE: 653 bdev_virtio_rw(ch, bdev_io); 654 return 0; 655 case SPDK_BDEV_IO_TYPE_RESET: 656 bdev_virtio_reset(ch, bdev_io); 657 return 0; 658 case SPDK_BDEV_IO_TYPE_UNMAP: { 659 uint64_t buf_len = 8 /* header size */ + 660 (bdev_io->u.bdev.num_blocks + UINT32_MAX - 1) / 661 UINT32_MAX * sizeof(struct spdk_scsi_unmap_bdesc); 662 663 if (!disk->info.unmap_supported) { 664 return -1; 665 } 666 667 if (buf_len > SPDK_BDEV_LARGE_BUF_MAX_SIZE) { 668 SPDK_ERRLOG("Trying to UNMAP too many blocks: %"PRIu64"\n", 669 bdev_io->u.bdev.num_blocks); 670 return -1; 671 } 672 spdk_bdev_io_get_buf(bdev_io, bdev_virtio_unmap, buf_len); 673 return 0; 674 } 675 case SPDK_BDEV_IO_TYPE_FLUSH: 676 default: 677 return -1; 678 } 679 return 0; 680 } 681 682 static void bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 683 { 684 if (_bdev_virtio_submit_request(ch, bdev_io) < 0) { 685 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 686 } 687 } 688 689 static bool 690 bdev_virtio_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) 691 { 692 struct virtio_scsi_disk *disk = ctx; 693 694 switch (io_type) { 695 case SPDK_BDEV_IO_TYPE_READ: 696 case SPDK_BDEV_IO_TYPE_WRITE: 697 case SPDK_BDEV_IO_TYPE_FLUSH: 698 case SPDK_BDEV_IO_TYPE_RESET: 699 return true; 700 701 case SPDK_BDEV_IO_TYPE_UNMAP: 702 return disk->info.unmap_supported; 703 704 default: 705 return false; 706 } 707 } 708 709 static struct spdk_io_channel * 710 bdev_virtio_get_io_channel(void *ctx) 711 { 712 struct virtio_scsi_disk *disk = ctx; 713 714 return spdk_get_io_channel(disk->svdev); 715 } 716 717 static int 718 bdev_virtio_disk_destruct(void *ctx) 719 { 720 struct virtio_scsi_disk *disk = ctx; 721 struct virtio_scsi_dev *svdev = disk->svdev; 722 723 TAILQ_REMOVE(&svdev->luns, disk, link); 724 free(disk->bdev.name); 725 free(disk); 726 727 if (svdev->removed && TAILQ_EMPTY(&svdev->luns)) { 728 spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb); 729 } 730 731 return 0; 732 } 733 734 static int 735 bdev_virtio_dump_info_json(void *ctx, struct spdk_json_write_ctx *w) 736 { 737 struct virtio_scsi_disk *disk = ctx; 738 739 virtio_dev_dump_json_info(&disk->svdev->vdev, w); 740 return 0; 741 } 742 743 static void 744 bdev_virtio_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w) 745 { 746 /* SCSI targets and LUNS are discovered during scan process so nothing 747 * to save here. 748 */ 749 } 750 751 static const struct spdk_bdev_fn_table virtio_fn_table = { 752 .destruct = bdev_virtio_disk_destruct, 753 .submit_request = bdev_virtio_submit_request, 754 .io_type_supported = bdev_virtio_io_type_supported, 755 .get_io_channel = bdev_virtio_get_io_channel, 756 .dump_info_json = bdev_virtio_dump_info_json, 757 .write_config_json = bdev_virtio_write_config_json, 758 }; 759 760 static void 761 get_scsi_status(struct virtio_scsi_cmd_resp *resp, int *sk, int *asc, int *ascq) 762 { 763 /* see spdk_scsi_task_build_sense_data() for sense data details */ 764 *sk = 0; 765 *asc = 0; 766 *ascq = 0; 767 768 if (resp->sense_len < 3) { 769 return; 770 } 771 772 *sk = resp->sense[2] & 0xf; 773 774 if (resp->sense_len < 13) { 775 return; 776 } 777 778 *asc = resp->sense[12]; 779 780 if (resp->sense_len < 14) { 781 return; 782 } 783 784 *ascq = resp->sense[13]; 785 } 786 787 static void 788 bdev_virtio_io_cpl(struct spdk_bdev_io *bdev_io) 789 { 790 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 791 int sk, asc, ascq; 792 793 get_scsi_status(&io_ctx->resp, &sk, &asc, &ascq); 794 spdk_bdev_io_complete_scsi_status(bdev_io, io_ctx->resp.status, sk, asc, ascq); 795 } 796 797 static int 798 bdev_virtio_poll(void *arg) 799 { 800 struct bdev_virtio_io_channel *ch = arg; 801 struct virtio_scsi_dev *svdev = ch->svdev; 802 struct virtio_scsi_scan_base *scan_ctx = svdev->scan_ctx; 803 void *io[32]; 804 uint32_t io_len[32]; 805 uint16_t i, cnt; 806 int rc; 807 808 cnt = virtio_recv_pkts(ch->vq, (void **)io, io_len, SPDK_COUNTOF(io)); 809 for (i = 0; i < cnt; ++i) { 810 if (spdk_unlikely(scan_ctx && io[i] == &scan_ctx->io_ctx)) { 811 if (svdev->removed) { 812 _virtio_scsi_dev_scan_finish(scan_ctx, -EINTR); 813 return -1; 814 } 815 816 if (scan_ctx->restart) { 817 scan_ctx->restart = false; 818 scan_ctx->full_scan = true; 819 _virtio_scsi_dev_scan_tgt(scan_ctx, 0); 820 continue; 821 } 822 823 process_scan_resp(scan_ctx); 824 continue; 825 } 826 827 bdev_virtio_io_cpl(io[i]); 828 } 829 830 if (spdk_unlikely(scan_ctx && scan_ctx->needs_resend)) { 831 if (svdev->removed) { 832 _virtio_scsi_dev_scan_finish(scan_ctx, -EINTR); 833 return -1; 834 } else if (cnt == 0) { 835 return 0; 836 } 837 838 rc = send_scan_io(scan_ctx); 839 if (rc != 0) { 840 assert(scan_ctx->retries > 0); 841 scan_ctx->retries--; 842 if (scan_ctx->retries == 0) { 843 SPDK_ERRLOG("Target scan failed unrecoverably with rc = %d.\n", rc); 844 _virtio_scsi_dev_scan_finish(scan_ctx, rc); 845 } 846 } 847 } 848 849 return cnt; 850 } 851 852 static void 853 bdev_virtio_tmf_cpl_cb(void *ctx) 854 { 855 struct spdk_bdev_io *bdev_io = ctx; 856 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 857 858 if (io_ctx->tmf_resp.response == VIRTIO_SCSI_S_OK) { 859 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 860 } else { 861 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 862 } 863 } 864 865 static void 866 bdev_virtio_tmf_cpl(struct spdk_bdev_io *bdev_io) 867 { 868 spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), bdev_virtio_tmf_cpl_cb, bdev_io); 869 } 870 871 static void 872 bdev_virtio_eventq_io_cpl(struct virtio_scsi_dev *svdev, struct virtio_scsi_eventq_io *io) 873 { 874 struct virtio_scsi_event *ev = &io->ev; 875 struct virtio_scsi_disk *disk; 876 877 if (ev->lun[0] != 1) { 878 SPDK_WARNLOG("Received an event with invalid data layout.\n"); 879 goto out; 880 } 881 882 if (ev->event & VIRTIO_SCSI_T_EVENTS_MISSED) { 883 ev->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED; 884 virtio_scsi_dev_scan(svdev, NULL, NULL); 885 } 886 887 switch (ev->event) { 888 case VIRTIO_SCSI_T_NO_EVENT: 889 break; 890 case VIRTIO_SCSI_T_TRANSPORT_RESET: 891 switch (ev->reason) { 892 case VIRTIO_SCSI_EVT_RESET_RESCAN: 893 virtio_scsi_dev_scan_tgt(svdev, ev->lun[1]); 894 break; 895 case VIRTIO_SCSI_EVT_RESET_REMOVED: 896 disk = virtio_scsi_dev_get_disk_by_id(svdev, ev->lun[1]); 897 if (disk != NULL) { 898 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 899 } 900 break; 901 default: 902 break; 903 } 904 break; 905 default: 906 break; 907 } 908 909 out: 910 virtio_scsi_dev_send_eventq_io(svdev->vdev.vqs[VIRTIO_SCSI_EVENTQ], io); 911 } 912 913 static void 914 bdev_virtio_tmf_abort_nomem_cb(void *ctx) 915 { 916 struct spdk_bdev_io *bdev_io = ctx; 917 918 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 919 } 920 921 static void 922 bdev_virtio_tmf_abort_ioerr_cb(void *ctx) 923 { 924 struct spdk_bdev_io *bdev_io = ctx; 925 926 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 927 } 928 929 static void 930 bdev_virtio_tmf_abort(struct spdk_bdev_io *bdev_io, int status) 931 { 932 spdk_msg_fn fn; 933 934 if (status == -ENOMEM) { 935 fn = bdev_virtio_tmf_abort_nomem_cb; 936 } else { 937 fn = bdev_virtio_tmf_abort_ioerr_cb; 938 } 939 940 spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), fn, bdev_io); 941 } 942 943 static int 944 bdev_virtio_send_tmf_io(struct virtqueue *ctrlq, struct spdk_bdev_io *bdev_io) 945 { 946 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 947 int rc; 948 949 rc = virtqueue_req_start(ctrlq, bdev_io, 2); 950 if (rc != 0) { 951 return rc; 952 } 953 954 virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 955 virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 956 957 virtqueue_req_flush(ctrlq); 958 return 0; 959 } 960 961 static int 962 bdev_virtio_mgmt_poll(void *arg) 963 { 964 struct virtio_scsi_dev *svdev = arg; 965 struct virtio_dev *vdev = &svdev->vdev; 966 struct virtqueue *eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ]; 967 struct virtqueue *ctrlq = vdev->vqs[VIRTIO_SCSI_CONTROLQ]; 968 struct spdk_ring *send_ring = svdev->ctrlq_ring; 969 void *io[16]; 970 uint32_t io_len[16]; 971 uint16_t i, cnt; 972 int rc; 973 int total = 0; 974 975 cnt = spdk_ring_dequeue(send_ring, io, SPDK_COUNTOF(io)); 976 total += cnt; 977 for (i = 0; i < cnt; ++i) { 978 rc = bdev_virtio_send_tmf_io(ctrlq, io[i]); 979 if (rc != 0) { 980 bdev_virtio_tmf_abort(io[i], rc); 981 } 982 } 983 984 cnt = virtio_recv_pkts(ctrlq, io, io_len, SPDK_COUNTOF(io)); 985 total += cnt; 986 for (i = 0; i < cnt; ++i) { 987 bdev_virtio_tmf_cpl(io[i]); 988 } 989 990 cnt = virtio_recv_pkts(eventq, io, io_len, SPDK_COUNTOF(io)); 991 total += cnt; 992 for (i = 0; i < cnt; ++i) { 993 bdev_virtio_eventq_io_cpl(svdev, io[i]); 994 } 995 996 return total; 997 } 998 999 static int 1000 bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf) 1001 { 1002 struct virtio_scsi_dev *svdev = io_device; 1003 struct virtio_dev *vdev = &svdev->vdev; 1004 struct bdev_virtio_io_channel *ch = ctx_buf; 1005 struct virtqueue *vq; 1006 int32_t queue_idx; 1007 1008 queue_idx = virtio_dev_find_and_acquire_queue(vdev, VIRTIO_SCSI_REQUESTQ); 1009 if (queue_idx < 0) { 1010 SPDK_ERRLOG("Couldn't get an unused queue for the io_channel.\n"); 1011 return -1; 1012 } 1013 1014 vq = vdev->vqs[queue_idx]; 1015 1016 ch->svdev = svdev; 1017 ch->vq = vq; 1018 1019 ch->poller = spdk_poller_register(bdev_virtio_poll, ch, 0); 1020 1021 return 0; 1022 } 1023 1024 static void 1025 bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf) 1026 { 1027 struct bdev_virtio_io_channel *ch = ctx_buf; 1028 struct virtio_scsi_dev *svdev = ch->svdev; 1029 struct virtio_dev *vdev = &svdev->vdev; 1030 struct virtqueue *vq = ch->vq; 1031 1032 spdk_poller_unregister(&ch->poller); 1033 virtio_dev_release_queue(vdev, vq->vq_queue_index); 1034 } 1035 1036 static void 1037 _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum) 1038 { 1039 struct virtio_scsi_dev *svdev = base->svdev; 1040 size_t bdevs_cnt; 1041 struct spdk_bdev *bdevs[BDEV_VIRTIO_MAX_TARGET]; 1042 struct virtio_scsi_disk *disk; 1043 struct virtio_scsi_scan_info *tgt, *next_tgt; 1044 1045 spdk_put_io_channel(spdk_io_channel_from_ctx(base->channel)); 1046 base->svdev->scan_ctx = NULL; 1047 1048 TAILQ_FOREACH_SAFE(tgt, &base->scan_queue, tailq, next_tgt) { 1049 TAILQ_REMOVE(&base->scan_queue, tgt, tailq); 1050 free(tgt); 1051 } 1052 1053 if (base->cb_fn == NULL) { 1054 spdk_free(base); 1055 return; 1056 } 1057 1058 bdevs_cnt = 0; 1059 if (errnum == 0) { 1060 TAILQ_FOREACH(disk, &svdev->luns, link) { 1061 bdevs[bdevs_cnt] = &disk->bdev; 1062 bdevs_cnt++; 1063 } 1064 } 1065 1066 base->cb_fn(base->cb_arg, errnum, bdevs, bdevs_cnt); 1067 spdk_free(base); 1068 } 1069 1070 static int 1071 send_scan_io(struct virtio_scsi_scan_base *base) 1072 { 1073 struct virtio_scsi_io_ctx *io_ctx = &base->io_ctx; 1074 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1075 struct virtqueue *vq = base->channel->vq; 1076 int payload_iov_cnt = base->iov.iov_len > 0 ? 1 : 0; 1077 int rc; 1078 1079 req->lun[0] = 1; 1080 req->lun[1] = base->info.target; 1081 1082 rc = virtqueue_req_start(vq, io_ctx, 2 + payload_iov_cnt); 1083 if (rc != 0) { 1084 base->needs_resend = true; 1085 return -1; 1086 } 1087 1088 virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 1089 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 1090 virtqueue_req_add_iovs(vq, &base->iov, payload_iov_cnt, SPDK_VIRTIO_DESC_WR); 1091 1092 virtqueue_req_flush(vq); 1093 return 0; 1094 } 1095 1096 static int 1097 send_inquiry(struct virtio_scsi_scan_base *base) 1098 { 1099 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1100 struct spdk_scsi_cdb_inquiry *cdb; 1101 1102 memset(req, 0, sizeof(*req)); 1103 1104 base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE; 1105 cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1106 cdb->opcode = SPDK_SPC_INQUIRY; 1107 to_be16(cdb->alloc_len, BDEV_VIRTIO_SCAN_PAYLOAD_SIZE); 1108 1109 return send_scan_io(base); 1110 } 1111 1112 static int 1113 send_inquiry_vpd(struct virtio_scsi_scan_base *base, uint8_t page_code) 1114 { 1115 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1116 struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1117 1118 memset(req, 0, sizeof(*req)); 1119 1120 base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE; 1121 inquiry_cdb->opcode = SPDK_SPC_INQUIRY; 1122 inquiry_cdb->evpd = 1; 1123 inquiry_cdb->page_code = page_code; 1124 to_be16(inquiry_cdb->alloc_len, base->iov.iov_len); 1125 1126 return send_scan_io(base); 1127 } 1128 1129 static int 1130 send_read_cap_10(struct virtio_scsi_scan_base *base) 1131 { 1132 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1133 1134 memset(req, 0, sizeof(*req)); 1135 1136 base->iov.iov_len = 8; 1137 req->cdb[0] = SPDK_SBC_READ_CAPACITY_10; 1138 1139 return send_scan_io(base); 1140 } 1141 1142 static int 1143 send_read_cap_16(struct virtio_scsi_scan_base *base) 1144 { 1145 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1146 1147 memset(req, 0, sizeof(*req)); 1148 1149 base->iov.iov_len = 32; 1150 req->cdb[0] = SPDK_SPC_SERVICE_ACTION_IN_16; 1151 req->cdb[1] = SPDK_SBC_SAI_READ_CAPACITY_16; 1152 to_be32(&req->cdb[10], base->iov.iov_len); 1153 1154 return send_scan_io(base); 1155 } 1156 1157 static int 1158 send_test_unit_ready(struct virtio_scsi_scan_base *base) 1159 { 1160 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1161 1162 memset(req, 0, sizeof(*req)); 1163 req->cdb[0] = SPDK_SPC_TEST_UNIT_READY; 1164 base->iov.iov_len = 0; 1165 1166 return send_scan_io(base); 1167 } 1168 1169 static int 1170 send_start_stop_unit(struct virtio_scsi_scan_base *base) 1171 { 1172 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1173 1174 memset(req, 0, sizeof(*req)); 1175 req->cdb[0] = SPDK_SBC_START_STOP_UNIT; 1176 req->cdb[4] = SPDK_SBC_START_STOP_UNIT_START_BIT; 1177 base->iov.iov_len = 0; 1178 1179 return send_scan_io(base); 1180 } 1181 1182 static int 1183 process_scan_start_stop_unit(struct virtio_scsi_scan_base *base) 1184 { 1185 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1186 1187 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1188 return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES); 1189 } 1190 1191 return -1; 1192 } 1193 1194 static int 1195 process_scan_test_unit_ready(struct virtio_scsi_scan_base *base) 1196 { 1197 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1198 int sk, asc, ascq; 1199 1200 get_scsi_status(resp, &sk, &asc, &ascq); 1201 1202 /* check response, get VPD if spun up otherwise send SSU */ 1203 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1204 return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES); 1205 } else if (resp->response == VIRTIO_SCSI_S_OK && 1206 resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION && 1207 sk == SPDK_SCSI_SENSE_UNIT_ATTENTION && 1208 asc == SPDK_SCSI_ASC_LOGICAL_UNIT_NOT_READY) { 1209 return send_start_stop_unit(base); 1210 } else { 1211 return -1; 1212 } 1213 } 1214 1215 static int 1216 process_scan_inquiry_standard(struct virtio_scsi_scan_base *base) 1217 { 1218 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1219 struct spdk_scsi_cdb_inquiry_data *inquiry_data = 1220 (struct spdk_scsi_cdb_inquiry_data *)base->payload; 1221 1222 if (resp->status != SPDK_SCSI_STATUS_GOOD) { 1223 return -1; 1224 } 1225 1226 /* check to make sure its a supported device */ 1227 if (inquiry_data->peripheral_device_type != SPDK_SPC_PERIPHERAL_DEVICE_TYPE_DISK || 1228 inquiry_data->peripheral_qualifier != SPDK_SPC_PERIPHERAL_QUALIFIER_CONNECTED) { 1229 SPDK_WARNLOG("Unsupported peripheral device type 0x%02x (qualifier 0x%02x)\n", 1230 inquiry_data->peripheral_device_type, 1231 inquiry_data->peripheral_qualifier); 1232 return -1; 1233 } 1234 1235 return send_test_unit_ready(base); 1236 } 1237 1238 static int 1239 process_scan_inquiry_vpd_supported_vpd_pages(struct virtio_scsi_scan_base *base) 1240 { 1241 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1242 bool block_provisioning_page_supported = false; 1243 1244 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1245 const uint8_t *vpd_data = base->payload; 1246 const uint8_t *supported_vpd_pages = vpd_data + 4; 1247 uint16_t page_length; 1248 uint16_t num_supported_pages; 1249 uint16_t i; 1250 1251 page_length = from_be16(vpd_data + 2); 1252 num_supported_pages = spdk_min(page_length, base->iov.iov_len - 4); 1253 1254 for (i = 0; i < num_supported_pages; i++) { 1255 if (supported_vpd_pages[i] == SPDK_SPC_VPD_BLOCK_THIN_PROVISION) { 1256 block_provisioning_page_supported = true; 1257 break; 1258 } 1259 } 1260 } 1261 1262 if (block_provisioning_page_supported) { 1263 return send_inquiry_vpd(base, SPDK_SPC_VPD_BLOCK_THIN_PROVISION); 1264 } else { 1265 return send_read_cap_10(base); 1266 } 1267 } 1268 1269 static int 1270 process_scan_inquiry_vpd_block_thin_provision(struct virtio_scsi_scan_base *base) 1271 { 1272 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1273 1274 base->info.unmap_supported = false; 1275 1276 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1277 uint8_t *vpd_data = base->payload; 1278 1279 base->info.unmap_supported = !!(vpd_data[5] & SPDK_SCSI_UNMAP_LBPU); 1280 } 1281 1282 SPDK_INFOLOG(SPDK_LOG_VIRTIO, "Target %u: unmap supported = %d\n", 1283 base->info.target, (int)base->info.unmap_supported); 1284 1285 return send_read_cap_10(base); 1286 } 1287 1288 static int 1289 process_scan_inquiry(struct virtio_scsi_scan_base *base) 1290 { 1291 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1292 struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1293 1294 if ((inquiry_cdb->evpd & 1) == 0) { 1295 return process_scan_inquiry_standard(base); 1296 } 1297 1298 switch (inquiry_cdb->page_code) { 1299 case SPDK_SPC_VPD_SUPPORTED_VPD_PAGES: 1300 return process_scan_inquiry_vpd_supported_vpd_pages(base); 1301 case SPDK_SPC_VPD_BLOCK_THIN_PROVISION: 1302 return process_scan_inquiry_vpd_block_thin_provision(base); 1303 default: 1304 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO, "Unexpected VPD page 0x%02x\n", inquiry_cdb->page_code); 1305 return -1; 1306 } 1307 } 1308 1309 static void 1310 bdev_virtio_disc_notify_remove(void *remove_ctx) 1311 { 1312 struct virtio_scsi_disk *disk = remove_ctx; 1313 1314 disk->removed = true; 1315 spdk_bdev_close(disk->notify_desc); 1316 } 1317 1318 /* To be called only from the thread performing target scan */ 1319 static int 1320 virtio_scsi_dev_add_tgt(struct virtio_scsi_dev *svdev, struct virtio_scsi_scan_info *info) 1321 { 1322 struct virtio_scsi_disk *disk; 1323 struct spdk_bdev *bdev; 1324 int rc; 1325 1326 TAILQ_FOREACH(disk, &svdev->luns, link) { 1327 if (disk->info.target == info->target) { 1328 /* Target is already attached and param change is not supported */ 1329 return 0; 1330 } 1331 } 1332 1333 if (info->block_size == 0 || info->num_blocks == 0) { 1334 SPDK_ERRLOG("%s: invalid target %u: bs=%"PRIu32" blocks=%"PRIu64"\n", 1335 svdev->vdev.name, info->target, info->block_size, info->num_blocks); 1336 return -EINVAL; 1337 } 1338 1339 disk = calloc(1, sizeof(*disk)); 1340 if (disk == NULL) { 1341 SPDK_ERRLOG("could not allocate disk\n"); 1342 return -ENOMEM; 1343 } 1344 1345 disk->svdev = svdev; 1346 memcpy(&disk->info, info, sizeof(*info)); 1347 1348 bdev = &disk->bdev; 1349 bdev->name = spdk_sprintf_alloc("%st%"PRIu8, svdev->vdev.name, info->target); 1350 if (bdev->name == NULL) { 1351 SPDK_ERRLOG("Couldn't alloc memory for the bdev name.\n"); 1352 free(disk); 1353 return -ENOMEM; 1354 } 1355 1356 bdev->product_name = "Virtio SCSI Disk"; 1357 bdev->write_cache = 0; 1358 bdev->blocklen = disk->info.block_size; 1359 bdev->blockcnt = disk->info.num_blocks; 1360 1361 bdev->ctxt = disk; 1362 bdev->fn_table = &virtio_fn_table; 1363 bdev->module = &virtio_scsi_if; 1364 1365 rc = spdk_bdev_register(&disk->bdev); 1366 if (rc) { 1367 SPDK_ERRLOG("Failed to register bdev name=%s\n", disk->bdev.name); 1368 free(bdev->name); 1369 free(disk); 1370 return rc; 1371 } 1372 1373 rc = spdk_bdev_open(bdev, false, bdev_virtio_disc_notify_remove, disk, &disk->notify_desc); 1374 if (rc) { 1375 assert(false); 1376 } 1377 1378 TAILQ_INSERT_TAIL(&svdev->luns, disk, link); 1379 return 0; 1380 } 1381 1382 static int 1383 process_read_cap_10(struct virtio_scsi_scan_base *base) 1384 { 1385 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1386 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1387 uint64_t max_block; 1388 uint32_t block_size; 1389 uint8_t target_id = req->lun[1]; 1390 int rc; 1391 1392 if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) { 1393 SPDK_ERRLOG("READ CAPACITY (10) failed for target %"PRIu8".\n", target_id); 1394 return -1; 1395 } 1396 1397 block_size = from_be32(base->payload + 4); 1398 max_block = from_be32(base->payload); 1399 1400 if (max_block == 0xffffffff) { 1401 return send_read_cap_16(base); 1402 } 1403 1404 base->info.num_blocks = (uint64_t)max_block + 1; 1405 base->info.block_size = block_size; 1406 1407 rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info); 1408 if (rc != 0) { 1409 return rc; 1410 } 1411 1412 return _virtio_scsi_dev_scan_next(base, 0); 1413 } 1414 1415 static int 1416 process_read_cap_16(struct virtio_scsi_scan_base *base) 1417 { 1418 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1419 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1420 uint8_t target_id = req->lun[1]; 1421 int rc; 1422 1423 if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) { 1424 SPDK_ERRLOG("READ CAPACITY (16) failed for target %"PRIu8".\n", target_id); 1425 return -1; 1426 } 1427 1428 base->info.num_blocks = from_be64(base->payload) + 1; 1429 base->info.block_size = from_be32(base->payload + 8); 1430 rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info); 1431 if (rc != 0) { 1432 return rc; 1433 } 1434 1435 return _virtio_scsi_dev_scan_next(base, 0); 1436 } 1437 1438 static void 1439 process_scan_resp(struct virtio_scsi_scan_base *base) 1440 { 1441 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1442 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1443 int rc, sk, asc, ascq; 1444 uint8_t target_id; 1445 1446 if (base->io_ctx.iov_req.iov_len < sizeof(struct virtio_scsi_cmd_req) || 1447 base->io_ctx.iov_resp.iov_len < sizeof(struct virtio_scsi_cmd_resp)) { 1448 SPDK_ERRLOG("Received target scan message with invalid length.\n"); 1449 _virtio_scsi_dev_scan_next(base, -EIO); 1450 return; 1451 } 1452 1453 get_scsi_status(resp, &sk, &asc, &ascq); 1454 target_id = req->lun[1]; 1455 1456 if (resp->response == VIRTIO_SCSI_S_BAD_TARGET || 1457 resp->response == VIRTIO_SCSI_S_INCORRECT_LUN) { 1458 _virtio_scsi_dev_scan_next(base, -ENODEV); 1459 return; 1460 } 1461 1462 if (resp->response != VIRTIO_SCSI_S_OK || 1463 (resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION && 1464 sk != SPDK_SCSI_SENSE_ILLEGAL_REQUEST)) { 1465 assert(base->retries > 0); 1466 base->retries--; 1467 if (base->retries == 0) { 1468 SPDK_NOTICELOG("Target %"PRIu8" is present, but unavailable.\n", target_id); 1469 SPDK_LOGDUMP(SPDK_LOG_VIRTIO, "CDB", req->cdb, sizeof(req->cdb)); 1470 SPDK_LOGDUMP(SPDK_LOG_VIRTIO, "SENSE DATA", resp->sense, sizeof(resp->sense)); 1471 _virtio_scsi_dev_scan_next(base, -EBUSY); 1472 return; 1473 } 1474 1475 /* resend the same request */ 1476 rc = send_scan_io(base); 1477 if (rc != 0) { 1478 /* Let response poller do the resend */ 1479 } 1480 return; 1481 } 1482 1483 base->retries = SCAN_REQUEST_RETRIES; 1484 1485 switch (req->cdb[0]) { 1486 case SPDK_SPC_INQUIRY: 1487 rc = process_scan_inquiry(base); 1488 break; 1489 case SPDK_SPC_TEST_UNIT_READY: 1490 rc = process_scan_test_unit_ready(base); 1491 break; 1492 case SPDK_SBC_START_STOP_UNIT: 1493 rc = process_scan_start_stop_unit(base); 1494 break; 1495 case SPDK_SBC_READ_CAPACITY_10: 1496 rc = process_read_cap_10(base); 1497 break; 1498 case SPDK_SPC_SERVICE_ACTION_IN_16: 1499 rc = process_read_cap_16(base); 1500 break; 1501 default: 1502 SPDK_ERRLOG("Received invalid target scan message: cdb[0] = %"PRIu8".\n", req->cdb[0]); 1503 rc = -1; 1504 break; 1505 } 1506 1507 if (rc != 0) { 1508 if (base->needs_resend) { 1509 return; /* Let response poller do the resend */ 1510 } 1511 1512 _virtio_scsi_dev_scan_next(base, rc); 1513 } 1514 } 1515 1516 static int 1517 _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc) 1518 { 1519 struct virtio_scsi_scan_info *next; 1520 struct virtio_scsi_disk *disk; 1521 uint8_t target_id; 1522 1523 if (base->full_scan) { 1524 if (rc != 0) { 1525 disk = virtio_scsi_dev_get_disk_by_id(base->svdev, 1526 base->info.target); 1527 if (disk != NULL) { 1528 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 1529 } 1530 } 1531 1532 target_id = base->info.target + 1; 1533 if (target_id < BDEV_VIRTIO_MAX_TARGET) { 1534 _virtio_scsi_dev_scan_tgt(base, target_id); 1535 return 0; 1536 } 1537 1538 base->full_scan = false; 1539 } 1540 1541 next = TAILQ_FIRST(&base->scan_queue); 1542 if (next == NULL) { 1543 _virtio_scsi_dev_scan_finish(base, 0); 1544 return 0; 1545 } 1546 1547 TAILQ_REMOVE(&base->scan_queue, next, tailq); 1548 target_id = next->target; 1549 free(next); 1550 1551 _virtio_scsi_dev_scan_tgt(base, target_id); 1552 return 0; 1553 } 1554 1555 static int 1556 virtio_pci_scsi_dev_enumerate_cb(struct virtio_pci_ctx *pci_ctx, void *ctx) 1557 { 1558 struct virtio_scsi_dev *svdev; 1559 1560 svdev = virtio_pci_scsi_dev_create(NULL, pci_ctx); 1561 return svdev == NULL ? -1 : 0; 1562 } 1563 1564 static int 1565 bdev_virtio_process_config(void) 1566 { 1567 struct spdk_conf_section *sp; 1568 struct virtio_scsi_dev *svdev; 1569 char *default_name = NULL; 1570 char *path, *type, *name; 1571 unsigned vdev_num; 1572 int num_queues; 1573 bool enable_pci; 1574 int rc = 0; 1575 1576 for (sp = spdk_conf_first_section(NULL); sp != NULL; sp = spdk_conf_next_section(sp)) { 1577 if (!spdk_conf_section_match_prefix(sp, "VirtioUser")) { 1578 continue; 1579 } 1580 1581 if (sscanf(spdk_conf_section_get_name(sp), "VirtioUser%u", &vdev_num) != 1) { 1582 SPDK_ERRLOG("Section '%s' has non-numeric suffix.\n", 1583 spdk_conf_section_get_name(sp)); 1584 rc = -1; 1585 goto out; 1586 } 1587 1588 path = spdk_conf_section_get_val(sp, "Path"); 1589 if (path == NULL) { 1590 SPDK_ERRLOG("VirtioUser%u: missing Path\n", vdev_num); 1591 rc = -1; 1592 goto out; 1593 } 1594 1595 type = spdk_conf_section_get_val(sp, "Type"); 1596 if (type != NULL && strcmp(type, "SCSI") != 0) { 1597 continue; 1598 } 1599 1600 num_queues = spdk_conf_section_get_intval(sp, "Queues"); 1601 if (num_queues < 1) { 1602 num_queues = 1; 1603 } else if (num_queues > SPDK_VIRTIO_MAX_VIRTQUEUES) { 1604 num_queues = SPDK_VIRTIO_MAX_VIRTQUEUES; 1605 } 1606 1607 name = spdk_conf_section_get_val(sp, "Name"); 1608 if (name == NULL) { 1609 default_name = spdk_sprintf_alloc("VirtioScsi%u", vdev_num); 1610 name = default_name; 1611 } 1612 1613 svdev = virtio_user_scsi_dev_create(name, path, num_queues, 512); 1614 free(default_name); 1615 default_name = NULL; 1616 1617 if (svdev == NULL) { 1618 rc = -1; 1619 goto out; 1620 } 1621 } 1622 1623 sp = spdk_conf_find_section(NULL, "VirtioPci"); 1624 if (sp == NULL) { 1625 return 0; 1626 } 1627 1628 enable_pci = spdk_conf_section_get_boolval(sp, "Enable", false); 1629 if (enable_pci) { 1630 rc = virtio_pci_dev_enumerate(virtio_pci_scsi_dev_enumerate_cb, NULL, 1631 PCI_DEVICE_ID_VIRTIO_SCSI_MODERN); 1632 } 1633 1634 out: 1635 return rc; 1636 } 1637 1638 static int 1639 _virtio_scsi_dev_scan_init(struct virtio_scsi_dev *svdev) 1640 { 1641 struct virtio_scsi_scan_base *base; 1642 struct spdk_io_channel *io_ch; 1643 struct virtio_scsi_io_ctx *io_ctx; 1644 struct virtio_scsi_cmd_req *req; 1645 struct virtio_scsi_cmd_resp *resp; 1646 1647 io_ch = spdk_get_io_channel(svdev); 1648 if (io_ch == NULL) { 1649 return -EBUSY; 1650 } 1651 1652 base = spdk_zmalloc(sizeof(*base), 64, NULL, 1653 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1654 if (base == NULL) { 1655 SPDK_ERRLOG("couldn't allocate memory for scsi target scan.\n"); 1656 return -ENOMEM; 1657 } 1658 1659 base->svdev = svdev; 1660 1661 base->channel = spdk_io_channel_get_ctx(io_ch); 1662 TAILQ_INIT(&base->scan_queue); 1663 svdev->scan_ctx = base; 1664 1665 base->iov.iov_base = base->payload; 1666 io_ctx = &base->io_ctx; 1667 req = &io_ctx->req; 1668 resp = &io_ctx->resp; 1669 io_ctx->iov_req.iov_base = req; 1670 io_ctx->iov_req.iov_len = sizeof(*req); 1671 io_ctx->iov_resp.iov_base = resp; 1672 io_ctx->iov_resp.iov_len = sizeof(*resp); 1673 1674 base->retries = SCAN_REQUEST_RETRIES; 1675 return 0; 1676 } 1677 1678 static void 1679 _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target) 1680 { 1681 int rc; 1682 1683 memset(&base->info, 0, sizeof(base->info)); 1684 base->info.target = target; 1685 1686 rc = send_inquiry(base); 1687 if (rc) { 1688 /* Let response poller do the resend */ 1689 } 1690 } 1691 1692 static int 1693 virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev, bdev_virtio_create_cb cb_fn, 1694 void *cb_arg) 1695 { 1696 struct virtio_scsi_scan_base *base; 1697 struct virtio_scsi_scan_info *tgt, *next_tgt; 1698 int rc; 1699 1700 if (svdev->scan_ctx) { 1701 if (svdev->scan_ctx->full_scan) { 1702 return -EEXIST; 1703 } 1704 1705 /* We're about to start a full rescan, so there's no need 1706 * to scan particular targets afterwards. 1707 */ 1708 TAILQ_FOREACH_SAFE(tgt, &svdev->scan_ctx->scan_queue, tailq, next_tgt) { 1709 TAILQ_REMOVE(&svdev->scan_ctx->scan_queue, tgt, tailq); 1710 free(tgt); 1711 } 1712 1713 svdev->scan_ctx->cb_fn = cb_fn; 1714 svdev->scan_ctx->cb_arg = cb_arg; 1715 svdev->scan_ctx->restart = true; 1716 return 0; 1717 } 1718 1719 rc = _virtio_scsi_dev_scan_init(svdev); 1720 if (rc != 0) { 1721 return rc; 1722 } 1723 1724 base = svdev->scan_ctx; 1725 base->cb_fn = cb_fn; 1726 base->cb_arg = cb_arg; 1727 base->full_scan = true; 1728 1729 _virtio_scsi_dev_scan_tgt(base, 0); 1730 return 0; 1731 } 1732 1733 static int 1734 virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target) 1735 { 1736 struct virtio_scsi_scan_base *base; 1737 struct virtio_scsi_scan_info *info; 1738 int rc; 1739 1740 base = svdev->scan_ctx; 1741 if (base) { 1742 info = calloc(1, sizeof(*info)); 1743 if (info == NULL) { 1744 SPDK_ERRLOG("calloc failed\n"); 1745 return -ENOMEM; 1746 } 1747 1748 info->target = target; 1749 TAILQ_INSERT_TAIL(&base->scan_queue, info, tailq); 1750 return 0; 1751 } 1752 1753 rc = _virtio_scsi_dev_scan_init(svdev); 1754 if (rc != 0) { 1755 return rc; 1756 } 1757 1758 base = svdev->scan_ctx; 1759 base->full_scan = true; 1760 _virtio_scsi_dev_scan_tgt(base, target); 1761 return 0; 1762 } 1763 1764 static void 1765 bdev_virtio_initial_scan_complete(void *ctx, int result, 1766 struct spdk_bdev **bdevs, size_t bdevs_cnt) 1767 { 1768 struct virtio_scsi_dev *svdev; 1769 1770 pthread_mutex_lock(&g_virtio_scsi_mutex); 1771 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 1772 if (svdev->scan_ctx) { 1773 /* another device is still being scanned */ 1774 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1775 return; 1776 } 1777 } 1778 1779 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1780 spdk_bdev_module_init_done(&virtio_scsi_if); 1781 } 1782 1783 static int 1784 bdev_virtio_initialize(void) 1785 { 1786 struct virtio_scsi_dev *svdev, *next_svdev; 1787 int rc; 1788 1789 rc = bdev_virtio_process_config(); 1790 pthread_mutex_lock(&g_virtio_scsi_mutex); 1791 1792 if (rc != 0) { 1793 goto err_unlock; 1794 } 1795 1796 if (TAILQ_EMPTY(&g_virtio_scsi_devs)) { 1797 goto out_unlock; 1798 } 1799 1800 /* Initialize all created devices and scan available targets */ 1801 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 1802 rc = virtio_scsi_dev_scan(svdev, bdev_virtio_initial_scan_complete, NULL); 1803 if (rc != 0) { 1804 goto err_unlock; 1805 } 1806 } 1807 1808 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1809 return 0; 1810 1811 err_unlock: 1812 /* Remove any created devices */ 1813 TAILQ_FOREACH_SAFE(svdev, &g_virtio_scsi_devs, tailq, next_svdev) { 1814 virtio_scsi_dev_remove(svdev, NULL, NULL); 1815 } 1816 1817 out_unlock: 1818 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1819 spdk_bdev_module_init_done(&virtio_scsi_if); 1820 return rc; 1821 } 1822 1823 static void 1824 _virtio_scsi_dev_unregister_cb(void *io_device) 1825 { 1826 struct virtio_scsi_dev *svdev = io_device; 1827 struct virtio_dev *vdev = &svdev->vdev; 1828 bool finish_module; 1829 bdev_virtio_remove_cb remove_cb; 1830 void *remove_ctx; 1831 1832 assert(spdk_ring_count(svdev->ctrlq_ring) == 0); 1833 spdk_ring_free(svdev->ctrlq_ring); 1834 spdk_poller_unregister(&svdev->mgmt_poller); 1835 1836 virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ); 1837 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 1838 1839 virtio_dev_stop(vdev); 1840 virtio_dev_destruct(vdev); 1841 1842 pthread_mutex_lock(&g_virtio_scsi_mutex); 1843 TAILQ_REMOVE(&g_virtio_scsi_devs, svdev, tailq); 1844 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1845 1846 remove_cb = svdev->remove_cb; 1847 remove_ctx = svdev->remove_ctx; 1848 spdk_free(svdev->eventq_ios); 1849 free(svdev); 1850 1851 if (remove_cb) { 1852 remove_cb(remove_ctx, 0); 1853 } 1854 1855 finish_module = TAILQ_EMPTY(&g_virtio_scsi_devs); 1856 1857 if (g_bdev_virtio_finish && finish_module) { 1858 spdk_bdev_module_finish_done(); 1859 } 1860 } 1861 1862 static void 1863 virtio_scsi_dev_unregister_cb(void *io_device) 1864 { 1865 struct virtio_scsi_dev *svdev = io_device; 1866 struct spdk_thread *thread; 1867 1868 thread = virtio_dev_queue_get_thread(&svdev->vdev, VIRTIO_SCSI_CONTROLQ); 1869 spdk_thread_send_msg(thread, _virtio_scsi_dev_unregister_cb, io_device); 1870 } 1871 1872 static void 1873 virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev, 1874 bdev_virtio_remove_cb cb_fn, void *cb_arg) 1875 { 1876 struct virtio_scsi_disk *disk, *disk_tmp; 1877 bool do_remove = true; 1878 1879 if (svdev->removed) { 1880 if (cb_fn) { 1881 cb_fn(cb_arg, -EBUSY); 1882 } 1883 return; 1884 } 1885 1886 svdev->remove_cb = cb_fn; 1887 svdev->remove_ctx = cb_arg; 1888 svdev->removed = true; 1889 1890 if (svdev->scan_ctx) { 1891 /* The removal will continue after we receive a pending scan I/O. */ 1892 return; 1893 } 1894 1895 TAILQ_FOREACH_SAFE(disk, &svdev->luns, link, disk_tmp) { 1896 if (!disk->removed) { 1897 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 1898 } 1899 do_remove = false; 1900 } 1901 1902 if (do_remove) { 1903 spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb); 1904 } 1905 } 1906 1907 static void 1908 bdev_virtio_finish(void) 1909 { 1910 struct virtio_scsi_dev *svdev, *next; 1911 1912 g_bdev_virtio_finish = true; 1913 1914 pthread_mutex_lock(&g_virtio_scsi_mutex); 1915 if (TAILQ_EMPTY(&g_virtio_scsi_devs)) { 1916 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1917 spdk_bdev_module_finish_done(); 1918 return; 1919 } 1920 1921 /* Defer module finish until all controllers are removed. */ 1922 TAILQ_FOREACH_SAFE(svdev, &g_virtio_scsi_devs, tailq, next) { 1923 virtio_scsi_dev_remove(svdev, NULL, NULL); 1924 } 1925 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1926 } 1927 1928 int 1929 bdev_virtio_user_scsi_dev_create(const char *base_name, const char *path, 1930 unsigned num_queues, unsigned queue_size, 1931 bdev_virtio_create_cb cb_fn, void *cb_arg) 1932 { 1933 struct virtio_scsi_dev *svdev; 1934 int rc; 1935 1936 svdev = virtio_user_scsi_dev_create(base_name, path, num_queues, queue_size); 1937 if (svdev == NULL) { 1938 return -1; 1939 } 1940 1941 rc = virtio_scsi_dev_scan(svdev, cb_fn, cb_arg); 1942 if (rc) { 1943 virtio_scsi_dev_remove(svdev, NULL, NULL); 1944 } 1945 1946 return rc; 1947 } 1948 1949 struct bdev_virtio_pci_dev_create_ctx { 1950 const char *name; 1951 bdev_virtio_create_cb cb_fn; 1952 void *cb_arg; 1953 }; 1954 1955 static int 1956 bdev_virtio_pci_scsi_dev_create_cb(struct virtio_pci_ctx *pci_ctx, void *ctx) 1957 { 1958 struct virtio_scsi_dev *svdev; 1959 struct bdev_virtio_pci_dev_create_ctx *create_ctx = ctx; 1960 int rc; 1961 1962 svdev = virtio_pci_scsi_dev_create(create_ctx->name, pci_ctx); 1963 if (svdev == NULL) { 1964 return -1; 1965 } 1966 1967 rc = virtio_scsi_dev_scan(svdev, create_ctx->cb_fn, create_ctx->cb_arg); 1968 if (rc) { 1969 virtio_scsi_dev_remove(svdev, NULL, NULL); 1970 } 1971 1972 return rc; 1973 } 1974 1975 int 1976 bdev_virtio_pci_scsi_dev_create(const char *name, struct spdk_pci_addr *pci_addr, 1977 bdev_virtio_create_cb cb_fn, void *cb_arg) 1978 { 1979 struct bdev_virtio_pci_dev_create_ctx create_ctx; 1980 1981 create_ctx.name = name; 1982 create_ctx.cb_fn = cb_fn; 1983 create_ctx.cb_arg = cb_arg; 1984 1985 return virtio_pci_dev_attach(bdev_virtio_pci_scsi_dev_create_cb, &create_ctx, 1986 PCI_DEVICE_ID_VIRTIO_SCSI_MODERN, pci_addr); 1987 } 1988 1989 int 1990 bdev_virtio_scsi_dev_remove(const char *name, bdev_virtio_remove_cb cb_fn, void *cb_arg) 1991 { 1992 struct virtio_scsi_dev *svdev; 1993 1994 pthread_mutex_lock(&g_virtio_scsi_mutex); 1995 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 1996 if (strcmp(svdev->vdev.name, name) == 0) { 1997 break; 1998 } 1999 } 2000 2001 if (svdev == NULL) { 2002 pthread_mutex_unlock(&g_virtio_scsi_mutex); 2003 SPDK_ERRLOG("Cannot find Virtio-SCSI device named '%s'\n", name); 2004 return -ENODEV; 2005 } 2006 2007 virtio_scsi_dev_remove(svdev, cb_fn, cb_arg); 2008 pthread_mutex_unlock(&g_virtio_scsi_mutex); 2009 2010 return 0; 2011 } 2012 2013 void 2014 bdev_virtio_scsi_dev_list(struct spdk_json_write_ctx *w) 2015 { 2016 struct virtio_scsi_dev *svdev; 2017 2018 spdk_json_write_array_begin(w); 2019 2020 pthread_mutex_lock(&g_virtio_scsi_mutex); 2021 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 2022 spdk_json_write_object_begin(w); 2023 2024 spdk_json_write_named_string(w, "name", svdev->vdev.name); 2025 2026 virtio_dev_dump_json_info(&svdev->vdev, w); 2027 2028 spdk_json_write_object_end(w); 2029 } 2030 pthread_mutex_unlock(&g_virtio_scsi_mutex); 2031 2032 spdk_json_write_array_end(w); 2033 } 2034 2035 SPDK_LOG_REGISTER_COMPONENT("virtio", SPDK_LOG_VIRTIO) 2036