1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/bdev.h" 37 #include "spdk/conf.h" 38 #include "spdk/endian.h" 39 #include "spdk/env.h" 40 #include "spdk/thread.h" 41 #include "spdk/scsi_spec.h" 42 #include "spdk/string.h" 43 #include "spdk/util.h" 44 #include "spdk/json.h" 45 46 #include "spdk/bdev_module.h" 47 #include "spdk_internal/log.h" 48 #include "spdk_internal/virtio.h" 49 #include "spdk_internal/vhost_user.h" 50 51 #include <linux/virtio_scsi.h> 52 53 #include "bdev_virtio.h" 54 55 #define BDEV_VIRTIO_MAX_TARGET 64 56 #define BDEV_VIRTIO_SCAN_PAYLOAD_SIZE 256 57 #define MGMT_POLL_PERIOD_US (1000 * 5) 58 #define CTRLQ_RING_SIZE 16 59 #define SCAN_REQUEST_RETRIES 5 60 61 /* Number of non-request queues - eventq and controlq */ 62 #define SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED 2 63 64 #define VIRTIO_SCSI_EVENTQ_BUFFER_COUNT 16 65 66 #define VIRTIO_SCSI_CONTROLQ 0 67 #define VIRTIO_SCSI_EVENTQ 1 68 #define VIRTIO_SCSI_REQUESTQ 2 69 70 static int bdev_virtio_initialize(void); 71 static void bdev_virtio_finish(void); 72 73 struct virtio_scsi_dev { 74 /* Generic virtio device data. */ 75 struct virtio_dev vdev; 76 77 /** Detected SCSI LUNs */ 78 TAILQ_HEAD(, virtio_scsi_disk) luns; 79 80 /** Context for the SCSI target scan. */ 81 struct virtio_scsi_scan_base *scan_ctx; 82 83 /** Controlq poller. */ 84 struct spdk_poller *mgmt_poller; 85 86 /** Controlq messages to be sent. */ 87 struct spdk_ring *ctrlq_ring; 88 89 /** Buffers for the eventq. */ 90 struct virtio_scsi_eventq_io *eventq_ios; 91 92 /** Device marked for removal. */ 93 bool removed; 94 95 /** Callback to be called after vdev removal. */ 96 bdev_virtio_remove_cb remove_cb; 97 98 /** Context for the `remove_cb`. */ 99 void *remove_ctx; 100 101 TAILQ_ENTRY(virtio_scsi_dev) tailq; 102 }; 103 104 struct virtio_scsi_io_ctx { 105 struct iovec iov_req; 106 struct iovec iov_resp; 107 union { 108 struct virtio_scsi_cmd_req req; 109 struct virtio_scsi_ctrl_tmf_req tmf_req; 110 }; 111 union { 112 struct virtio_scsi_cmd_resp resp; 113 struct virtio_scsi_ctrl_tmf_resp tmf_resp; 114 }; 115 }; 116 117 struct virtio_scsi_eventq_io { 118 struct iovec iov; 119 struct virtio_scsi_event ev; 120 }; 121 122 struct virtio_scsi_scan_info { 123 uint64_t num_blocks; 124 uint32_t block_size; 125 uint8_t target; 126 bool unmap_supported; 127 TAILQ_ENTRY(virtio_scsi_scan_info) tailq; 128 }; 129 130 struct virtio_scsi_scan_base { 131 struct virtio_scsi_dev *svdev; 132 133 /** I/O channel used for the scan I/O. */ 134 struct bdev_virtio_io_channel *channel; 135 136 bdev_virtio_create_cb cb_fn; 137 void *cb_arg; 138 139 /** Scan all targets on the device. */ 140 bool full_scan; 141 142 /** Start a full rescan after receiving next scan I/O response. */ 143 bool restart; 144 145 /** Additional targets to be (re)scanned. */ 146 TAILQ_HEAD(, virtio_scsi_scan_info) scan_queue; 147 148 /** Remaining attempts for sending the current request. */ 149 unsigned retries; 150 151 /** If set, the last scan I/O needs to be resent */ 152 bool needs_resend; 153 154 struct virtio_scsi_io_ctx io_ctx; 155 struct iovec iov; 156 uint8_t payload[BDEV_VIRTIO_SCAN_PAYLOAD_SIZE]; 157 158 /** Scan results for the current target. */ 159 struct virtio_scsi_scan_info info; 160 }; 161 162 struct virtio_scsi_disk { 163 struct spdk_bdev bdev; 164 struct virtio_scsi_dev *svdev; 165 struct virtio_scsi_scan_info info; 166 167 /** Descriptor opened just to be notified of external bdev hotremove. */ 168 struct spdk_bdev_desc *notify_desc; 169 170 /** Disk marked for removal. */ 171 bool removed; 172 TAILQ_ENTRY(virtio_scsi_disk) link; 173 }; 174 175 struct bdev_virtio_io_channel { 176 struct virtio_scsi_dev *svdev; 177 178 /** Virtqueue exclusively assigned to this channel. */ 179 struct virtqueue *vq; 180 181 /** Virtio response poller. */ 182 struct spdk_poller *poller; 183 }; 184 185 static TAILQ_HEAD(, virtio_scsi_dev) g_virtio_scsi_devs = 186 TAILQ_HEAD_INITIALIZER(g_virtio_scsi_devs); 187 188 static pthread_mutex_t g_virtio_scsi_mutex = PTHREAD_MUTEX_INITIALIZER; 189 190 /** Module finish in progress */ 191 static bool g_bdev_virtio_finish = false; 192 193 /* Features desired/implemented by this driver. */ 194 #define VIRTIO_SCSI_DEV_SUPPORTED_FEATURES \ 195 (1ULL << VIRTIO_SCSI_F_INOUT | \ 196 1ULL << VIRTIO_SCSI_F_HOTPLUG | \ 197 1ULL << VIRTIO_RING_F_EVENT_IDX | \ 198 1ULL << VHOST_USER_F_PROTOCOL_FEATURES) 199 200 static void virtio_scsi_dev_unregister_cb(void *io_device); 201 static void virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev, 202 bdev_virtio_remove_cb cb_fn, void *cb_arg); 203 static int bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf); 204 static void bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf); 205 static void process_scan_resp(struct virtio_scsi_scan_base *base); 206 static int bdev_virtio_mgmt_poll(void *arg); 207 208 static int 209 virtio_scsi_dev_send_eventq_io(struct virtqueue *vq, struct virtio_scsi_eventq_io *io) 210 { 211 int rc; 212 213 rc = virtqueue_req_start(vq, io, 1); 214 if (rc != 0) { 215 return -1; 216 } 217 218 virtqueue_req_add_iovs(vq, &io->iov, 1, SPDK_VIRTIO_DESC_WR); 219 virtqueue_req_flush(vq); 220 221 return 0; 222 } 223 224 static int 225 virtio_scsi_dev_init(struct virtio_scsi_dev *svdev, uint16_t max_queues) 226 { 227 struct virtio_dev *vdev = &svdev->vdev; 228 struct spdk_ring *ctrlq_ring; 229 struct virtio_scsi_eventq_io *eventq_io; 230 struct virtqueue *eventq; 231 uint16_t i, num_events; 232 int rc; 233 234 rc = virtio_dev_reset(vdev, VIRTIO_SCSI_DEV_SUPPORTED_FEATURES); 235 if (rc != 0) { 236 return rc; 237 } 238 239 rc = virtio_dev_start(vdev, max_queues, SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED); 240 if (rc != 0) { 241 return rc; 242 } 243 244 ctrlq_ring = spdk_ring_create(SPDK_RING_TYPE_MP_SC, CTRLQ_RING_SIZE, 245 SPDK_ENV_SOCKET_ID_ANY); 246 if (ctrlq_ring == NULL) { 247 SPDK_ERRLOG("Failed to allocate send ring for the controlq.\n"); 248 return -1; 249 } 250 251 rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_CONTROLQ); 252 if (rc != 0) { 253 SPDK_ERRLOG("Failed to acquire the controlq.\n"); 254 spdk_ring_free(ctrlq_ring); 255 return -1; 256 } 257 258 rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_EVENTQ); 259 if (rc != 0) { 260 SPDK_ERRLOG("Failed to acquire the eventq.\n"); 261 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 262 spdk_ring_free(ctrlq_ring); 263 return -1; 264 } 265 266 eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ]; 267 num_events = spdk_min(eventq->vq_nentries, VIRTIO_SCSI_EVENTQ_BUFFER_COUNT); 268 svdev->eventq_ios = spdk_zmalloc(sizeof(*svdev->eventq_ios) * num_events, 269 0, NULL, SPDK_ENV_LCORE_ID_ANY, 270 SPDK_MALLOC_DMA); 271 if (svdev->eventq_ios == NULL) { 272 SPDK_ERRLOG("cannot allocate memory for %"PRIu16" eventq buffers\n", 273 num_events); 274 virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ); 275 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 276 spdk_ring_free(ctrlq_ring); 277 return -1; 278 } 279 280 for (i = 0; i < num_events; i++) { 281 eventq_io = &svdev->eventq_ios[i]; 282 eventq_io->iov.iov_base = &eventq_io->ev; 283 eventq_io->iov.iov_len = sizeof(eventq_io->ev); 284 virtio_scsi_dev_send_eventq_io(eventq, eventq_io); 285 } 286 287 svdev->ctrlq_ring = ctrlq_ring; 288 289 svdev->mgmt_poller = SPDK_POLLER_REGISTER(bdev_virtio_mgmt_poll, svdev, 290 MGMT_POLL_PERIOD_US); 291 292 TAILQ_INIT(&svdev->luns); 293 svdev->scan_ctx = NULL; 294 svdev->removed = false; 295 svdev->remove_cb = NULL; 296 svdev->remove_ctx = NULL; 297 298 spdk_io_device_register(svdev, bdev_virtio_scsi_ch_create_cb, 299 bdev_virtio_scsi_ch_destroy_cb, 300 sizeof(struct bdev_virtio_io_channel), 301 svdev->vdev.name); 302 303 pthread_mutex_lock(&g_virtio_scsi_mutex); 304 TAILQ_INSERT_TAIL(&g_virtio_scsi_devs, svdev, tailq); 305 pthread_mutex_unlock(&g_virtio_scsi_mutex); 306 return 0; 307 } 308 309 static struct virtio_scsi_dev * 310 virtio_pci_scsi_dev_create(const char *name, struct virtio_pci_ctx *pci_ctx) 311 { 312 static int pci_dev_counter = 0; 313 struct virtio_scsi_dev *svdev; 314 struct virtio_dev *vdev; 315 char *default_name = NULL; 316 uint32_t num_queues; 317 int rc; 318 319 svdev = calloc(1, sizeof(*svdev)); 320 if (svdev == NULL) { 321 SPDK_ERRLOG("virtio device calloc failed\n"); 322 return NULL; 323 } 324 325 vdev = &svdev->vdev; 326 if (name == NULL) { 327 default_name = spdk_sprintf_alloc("VirtioScsi%"PRIu32, pci_dev_counter++); 328 if (default_name == NULL) { 329 free(vdev); 330 return NULL; 331 } 332 name = default_name; 333 } 334 335 rc = virtio_pci_dev_init(vdev, name, pci_ctx); 336 free(default_name); 337 338 if (rc != 0) { 339 free(svdev); 340 return NULL; 341 } 342 343 rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_scsi_config, num_queues), 344 &num_queues, sizeof(num_queues)); 345 if (rc) { 346 SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc)); 347 goto fail; 348 } 349 350 rc = virtio_scsi_dev_init(svdev, num_queues); 351 if (rc != 0) { 352 goto fail; 353 } 354 355 return svdev; 356 357 fail: 358 vdev->ctx = NULL; 359 virtio_dev_destruct(vdev); 360 free(svdev); 361 return NULL; 362 } 363 364 static struct virtio_scsi_dev * 365 virtio_user_scsi_dev_create(const char *name, const char *path, 366 uint16_t num_queues, uint32_t queue_size) 367 { 368 struct virtio_scsi_dev *svdev; 369 struct virtio_dev *vdev; 370 int rc; 371 372 svdev = calloc(1, sizeof(*svdev)); 373 if (svdev == NULL) { 374 SPDK_ERRLOG("calloc failed for virtio device %s: %s\n", name, path); 375 return NULL; 376 } 377 378 vdev = &svdev->vdev; 379 rc = virtio_user_dev_init(vdev, name, path, queue_size); 380 if (rc != 0) { 381 SPDK_ERRLOG("Failed to create virito device %s: %s\n", name, path); 382 free(svdev); 383 return NULL; 384 } 385 386 rc = virtio_scsi_dev_init(svdev, num_queues); 387 if (rc != 0) { 388 virtio_dev_destruct(vdev); 389 free(svdev); 390 return NULL; 391 } 392 393 return svdev; 394 } 395 396 static struct virtio_scsi_disk * 397 virtio_scsi_dev_get_disk_by_id(struct virtio_scsi_dev *svdev, uint8_t target_id) 398 { 399 struct virtio_scsi_disk *disk; 400 401 TAILQ_FOREACH(disk, &svdev->luns, link) { 402 if (disk->info.target == target_id) { 403 return disk; 404 } 405 } 406 407 return NULL; 408 } 409 410 static int virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev, 411 bdev_virtio_create_cb cb_fn, void *cb_arg); 412 static int send_scan_io(struct virtio_scsi_scan_base *base); 413 static void _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target); 414 static int _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc); 415 static void _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum); 416 static int virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target); 417 418 static int 419 bdev_virtio_get_ctx_size(void) 420 { 421 return sizeof(struct virtio_scsi_io_ctx); 422 } 423 424 static int 425 bdev_virtio_scsi_config_json(struct spdk_json_write_ctx *w) 426 { 427 struct virtio_scsi_dev *svdev; 428 429 pthread_mutex_lock(&g_virtio_scsi_mutex); 430 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 431 spdk_json_write_object_begin(w); 432 433 spdk_json_write_named_string(w, "method", "bdev_virtio_attach_controller"); 434 435 spdk_json_write_named_object_begin(w, "params"); 436 spdk_json_write_named_string(w, "name", svdev->vdev.name); 437 spdk_json_write_named_string(w, "dev_type", "scsi"); 438 439 /* Write transport specific parameters. */ 440 svdev->vdev.backend_ops->write_json_config(&svdev->vdev, w); 441 442 spdk_json_write_object_end(w); 443 444 spdk_json_write_object_end(w); 445 446 } 447 pthread_mutex_unlock(&g_virtio_scsi_mutex); 448 449 return 0; 450 } 451 452 453 static struct spdk_bdev_module virtio_scsi_if = { 454 .name = "virtio_scsi", 455 .module_init = bdev_virtio_initialize, 456 .module_fini = bdev_virtio_finish, 457 .get_ctx_size = bdev_virtio_get_ctx_size, 458 .config_json = bdev_virtio_scsi_config_json, 459 .async_init = true, 460 .async_fini = true, 461 }; 462 463 SPDK_BDEV_MODULE_REGISTER(virtio_scsi, &virtio_scsi_if) 464 465 static struct virtio_scsi_io_ctx * 466 bdev_virtio_init_io_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 467 { 468 struct virtio_scsi_cmd_req *req; 469 struct virtio_scsi_cmd_resp *resp; 470 struct virtio_scsi_disk *disk = (struct virtio_scsi_disk *)bdev_io->bdev; 471 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 472 473 req = &io_ctx->req; 474 resp = &io_ctx->resp; 475 476 io_ctx->iov_req.iov_base = req; 477 io_ctx->iov_req.iov_len = sizeof(*req); 478 479 io_ctx->iov_resp.iov_base = resp; 480 io_ctx->iov_resp.iov_len = sizeof(*resp); 481 482 memset(req, 0, sizeof(*req)); 483 req->lun[0] = 1; 484 req->lun[1] = disk->info.target; 485 486 return io_ctx; 487 } 488 489 static struct virtio_scsi_io_ctx * 490 bdev_virtio_init_tmf_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 491 { 492 struct virtio_scsi_ctrl_tmf_req *tmf_req; 493 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 494 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 495 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 496 497 tmf_req = &io_ctx->tmf_req; 498 tmf_resp = &io_ctx->tmf_resp; 499 500 io_ctx->iov_req.iov_base = tmf_req; 501 io_ctx->iov_req.iov_len = sizeof(*tmf_req); 502 io_ctx->iov_resp.iov_base = tmf_resp; 503 io_ctx->iov_resp.iov_len = sizeof(*tmf_resp); 504 505 memset(tmf_req, 0, sizeof(*tmf_req)); 506 tmf_req->lun[0] = 1; 507 tmf_req->lun[1] = disk->info.target; 508 509 return io_ctx; 510 } 511 512 static void 513 bdev_virtio_send_io(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 514 { 515 struct bdev_virtio_io_channel *virtio_channel = spdk_io_channel_get_ctx(ch); 516 struct virtqueue *vq = virtio_channel->vq; 517 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 518 int rc; 519 520 rc = virtqueue_req_start(vq, bdev_io, bdev_io->u.bdev.iovcnt + 2); 521 if (rc == -ENOMEM) { 522 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 523 return; 524 } else if (rc != 0) { 525 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 526 return; 527 } 528 529 virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 530 if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 531 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 532 virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 533 SPDK_VIRTIO_DESC_WR); 534 } else { 535 virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 536 SPDK_VIRTIO_DESC_RO); 537 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 538 } 539 540 virtqueue_req_flush(vq); 541 } 542 543 static void 544 bdev_virtio_rw(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 545 { 546 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 547 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io); 548 struct virtio_scsi_cmd_req *req = &io_ctx->req; 549 bool is_write = bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE; 550 551 if (disk->info.num_blocks > (1ULL << 32)) { 552 req->cdb[0] = is_write ? SPDK_SBC_WRITE_16 : SPDK_SBC_READ_16; 553 to_be64(&req->cdb[2], bdev_io->u.bdev.offset_blocks); 554 to_be32(&req->cdb[10], bdev_io->u.bdev.num_blocks); 555 } else { 556 req->cdb[0] = is_write ? SPDK_SBC_WRITE_10 : SPDK_SBC_READ_10; 557 to_be32(&req->cdb[2], bdev_io->u.bdev.offset_blocks); 558 to_be16(&req->cdb[7], bdev_io->u.bdev.num_blocks); 559 } 560 561 bdev_virtio_send_io(ch, bdev_io); 562 } 563 564 static void 565 bdev_virtio_reset(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 566 { 567 struct bdev_virtio_io_channel *virtio_ch = spdk_io_channel_get_ctx(ch); 568 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_tmf_vreq(ch, bdev_io); 569 struct virtio_scsi_ctrl_tmf_req *tmf_req = &io_ctx->tmf_req; 570 struct virtio_scsi_dev *svdev = virtio_ch->svdev; 571 size_t enqueued_count; 572 573 tmf_req->type = VIRTIO_SCSI_T_TMF; 574 tmf_req->subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET; 575 576 enqueued_count = spdk_ring_enqueue(svdev->ctrlq_ring, (void **)&bdev_io, 1, NULL); 577 if (spdk_likely(enqueued_count == 1)) { 578 return; 579 } else { 580 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 581 } 582 } 583 584 static void 585 bdev_virtio_unmap(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success) 586 { 587 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io); 588 struct virtio_scsi_cmd_req *req = &io_ctx->req; 589 struct spdk_scsi_unmap_bdesc *desc, *first_desc; 590 uint8_t *buf; 591 uint64_t offset_blocks, num_blocks; 592 uint16_t cmd_len; 593 594 if (!success) { 595 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 596 return; 597 } 598 599 buf = bdev_io->u.bdev.iovs[0].iov_base; 600 601 offset_blocks = bdev_io->u.bdev.offset_blocks; 602 num_blocks = bdev_io->u.bdev.num_blocks; 603 604 /* (n-1) * 16-byte descriptors */ 605 first_desc = desc = (struct spdk_scsi_unmap_bdesc *)&buf[8]; 606 while (num_blocks > UINT32_MAX) { 607 to_be64(&desc->lba, offset_blocks); 608 to_be32(&desc->block_count, UINT32_MAX); 609 memset(&desc->reserved, 0, sizeof(desc->reserved)); 610 offset_blocks += UINT32_MAX; 611 num_blocks -= UINT32_MAX; 612 desc++; 613 } 614 615 /* The last descriptor with block_count <= UINT32_MAX */ 616 to_be64(&desc->lba, offset_blocks); 617 to_be32(&desc->block_count, num_blocks); 618 memset(&desc->reserved, 0, sizeof(desc->reserved)); 619 620 /* 8-byte header + n * 16-byte block descriptor */ 621 cmd_len = 8 + (desc - first_desc + 1) * sizeof(struct spdk_scsi_unmap_bdesc); 622 623 req->cdb[0] = SPDK_SBC_UNMAP; 624 to_be16(&req->cdb[7], cmd_len); 625 626 /* 8-byte header */ 627 to_be16(&buf[0], cmd_len - 2); /* total length (excluding the length field) */ 628 to_be16(&buf[2], cmd_len - 8); /* length of block descriptors */ 629 memset(&buf[4], 0, 4); /* reserved */ 630 631 bdev_virtio_send_io(ch, bdev_io); 632 } 633 634 static void 635 bdev_virtio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 636 bool success) 637 { 638 if (!success) { 639 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 640 return; 641 } 642 643 bdev_virtio_rw(ch, bdev_io); 644 } 645 646 static int _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 647 { 648 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 649 650 switch (bdev_io->type) { 651 case SPDK_BDEV_IO_TYPE_READ: 652 spdk_bdev_io_get_buf(bdev_io, bdev_virtio_get_buf_cb, 653 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 654 return 0; 655 case SPDK_BDEV_IO_TYPE_WRITE: 656 bdev_virtio_rw(ch, bdev_io); 657 return 0; 658 case SPDK_BDEV_IO_TYPE_RESET: 659 bdev_virtio_reset(ch, bdev_io); 660 return 0; 661 case SPDK_BDEV_IO_TYPE_UNMAP: { 662 uint64_t buf_len = 8 /* header size */ + 663 (bdev_io->u.bdev.num_blocks + UINT32_MAX - 1) / 664 UINT32_MAX * sizeof(struct spdk_scsi_unmap_bdesc); 665 666 if (!disk->info.unmap_supported) { 667 return -1; 668 } 669 670 if (buf_len > SPDK_BDEV_LARGE_BUF_MAX_SIZE) { 671 SPDK_ERRLOG("Trying to UNMAP too many blocks: %"PRIu64"\n", 672 bdev_io->u.bdev.num_blocks); 673 return -1; 674 } 675 spdk_bdev_io_get_buf(bdev_io, bdev_virtio_unmap, buf_len); 676 return 0; 677 } 678 case SPDK_BDEV_IO_TYPE_FLUSH: 679 default: 680 return -1; 681 } 682 return 0; 683 } 684 685 static void bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 686 { 687 if (_bdev_virtio_submit_request(ch, bdev_io) < 0) { 688 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 689 } 690 } 691 692 static bool 693 bdev_virtio_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) 694 { 695 struct virtio_scsi_disk *disk = ctx; 696 697 switch (io_type) { 698 case SPDK_BDEV_IO_TYPE_READ: 699 case SPDK_BDEV_IO_TYPE_WRITE: 700 case SPDK_BDEV_IO_TYPE_FLUSH: 701 case SPDK_BDEV_IO_TYPE_RESET: 702 return true; 703 704 case SPDK_BDEV_IO_TYPE_UNMAP: 705 return disk->info.unmap_supported; 706 707 default: 708 return false; 709 } 710 } 711 712 static struct spdk_io_channel * 713 bdev_virtio_get_io_channel(void *ctx) 714 { 715 struct virtio_scsi_disk *disk = ctx; 716 717 return spdk_get_io_channel(disk->svdev); 718 } 719 720 static int 721 bdev_virtio_disk_destruct(void *ctx) 722 { 723 struct virtio_scsi_disk *disk = ctx; 724 struct virtio_scsi_dev *svdev = disk->svdev; 725 726 TAILQ_REMOVE(&svdev->luns, disk, link); 727 free(disk->bdev.name); 728 free(disk); 729 730 if (svdev->removed && TAILQ_EMPTY(&svdev->luns)) { 731 spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb); 732 } 733 734 return 0; 735 } 736 737 static int 738 bdev_virtio_dump_info_json(void *ctx, struct spdk_json_write_ctx *w) 739 { 740 struct virtio_scsi_disk *disk = ctx; 741 742 virtio_dev_dump_json_info(&disk->svdev->vdev, w); 743 return 0; 744 } 745 746 static void 747 bdev_virtio_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w) 748 { 749 /* SCSI targets and LUNS are discovered during scan process so nothing 750 * to save here. 751 */ 752 } 753 754 static const struct spdk_bdev_fn_table virtio_fn_table = { 755 .destruct = bdev_virtio_disk_destruct, 756 .submit_request = bdev_virtio_submit_request, 757 .io_type_supported = bdev_virtio_io_type_supported, 758 .get_io_channel = bdev_virtio_get_io_channel, 759 .dump_info_json = bdev_virtio_dump_info_json, 760 .write_config_json = bdev_virtio_write_config_json, 761 }; 762 763 static void 764 get_scsi_status(struct virtio_scsi_cmd_resp *resp, int *sk, int *asc, int *ascq) 765 { 766 /* see spdk_scsi_task_build_sense_data() for sense data details */ 767 *sk = 0; 768 *asc = 0; 769 *ascq = 0; 770 771 if (resp->sense_len < 3) { 772 return; 773 } 774 775 *sk = resp->sense[2] & 0xf; 776 777 if (resp->sense_len < 13) { 778 return; 779 } 780 781 *asc = resp->sense[12]; 782 783 if (resp->sense_len < 14) { 784 return; 785 } 786 787 *ascq = resp->sense[13]; 788 } 789 790 static void 791 bdev_virtio_io_cpl(struct spdk_bdev_io *bdev_io) 792 { 793 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 794 int sk, asc, ascq; 795 796 get_scsi_status(&io_ctx->resp, &sk, &asc, &ascq); 797 spdk_bdev_io_complete_scsi_status(bdev_io, io_ctx->resp.status, sk, asc, ascq); 798 } 799 800 static int 801 bdev_virtio_poll(void *arg) 802 { 803 struct bdev_virtio_io_channel *ch = arg; 804 struct virtio_scsi_dev *svdev = ch->svdev; 805 struct virtio_scsi_scan_base *scan_ctx = svdev->scan_ctx; 806 void *io[32]; 807 uint32_t io_len[32]; 808 uint16_t i, cnt; 809 int rc; 810 811 cnt = virtio_recv_pkts(ch->vq, (void **)io, io_len, SPDK_COUNTOF(io)); 812 for (i = 0; i < cnt; ++i) { 813 if (spdk_unlikely(scan_ctx && io[i] == &scan_ctx->io_ctx)) { 814 if (svdev->removed) { 815 _virtio_scsi_dev_scan_finish(scan_ctx, -EINTR); 816 return SPDK_POLLER_BUSY; 817 } 818 819 if (scan_ctx->restart) { 820 scan_ctx->restart = false; 821 scan_ctx->full_scan = true; 822 _virtio_scsi_dev_scan_tgt(scan_ctx, 0); 823 continue; 824 } 825 826 process_scan_resp(scan_ctx); 827 continue; 828 } 829 830 bdev_virtio_io_cpl(io[i]); 831 } 832 833 if (spdk_unlikely(scan_ctx && scan_ctx->needs_resend)) { 834 if (svdev->removed) { 835 _virtio_scsi_dev_scan_finish(scan_ctx, -EINTR); 836 return SPDK_POLLER_BUSY; 837 } else if (cnt == 0) { 838 return SPDK_POLLER_IDLE; 839 } 840 841 rc = send_scan_io(scan_ctx); 842 if (rc != 0) { 843 assert(scan_ctx->retries > 0); 844 scan_ctx->retries--; 845 if (scan_ctx->retries == 0) { 846 SPDK_ERRLOG("Target scan failed unrecoverably with rc = %d.\n", rc); 847 _virtio_scsi_dev_scan_finish(scan_ctx, rc); 848 } 849 } 850 } 851 852 return cnt; 853 } 854 855 static void 856 bdev_virtio_tmf_cpl_cb(void *ctx) 857 { 858 struct spdk_bdev_io *bdev_io = ctx; 859 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 860 861 if (io_ctx->tmf_resp.response == VIRTIO_SCSI_S_OK) { 862 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 863 } else { 864 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 865 } 866 } 867 868 static void 869 bdev_virtio_tmf_cpl(struct spdk_bdev_io *bdev_io) 870 { 871 spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), bdev_virtio_tmf_cpl_cb, bdev_io); 872 } 873 874 static void 875 bdev_virtio_eventq_io_cpl(struct virtio_scsi_dev *svdev, struct virtio_scsi_eventq_io *io) 876 { 877 struct virtio_scsi_event *ev = &io->ev; 878 struct virtio_scsi_disk *disk; 879 880 if (ev->lun[0] != 1) { 881 SPDK_WARNLOG("Received an event with invalid data layout.\n"); 882 goto out; 883 } 884 885 if (ev->event & VIRTIO_SCSI_T_EVENTS_MISSED) { 886 ev->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED; 887 virtio_scsi_dev_scan(svdev, NULL, NULL); 888 } 889 890 switch (ev->event) { 891 case VIRTIO_SCSI_T_NO_EVENT: 892 break; 893 case VIRTIO_SCSI_T_TRANSPORT_RESET: 894 switch (ev->reason) { 895 case VIRTIO_SCSI_EVT_RESET_RESCAN: 896 virtio_scsi_dev_scan_tgt(svdev, ev->lun[1]); 897 break; 898 case VIRTIO_SCSI_EVT_RESET_REMOVED: 899 disk = virtio_scsi_dev_get_disk_by_id(svdev, ev->lun[1]); 900 if (disk != NULL) { 901 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 902 } 903 break; 904 default: 905 break; 906 } 907 break; 908 default: 909 break; 910 } 911 912 out: 913 virtio_scsi_dev_send_eventq_io(svdev->vdev.vqs[VIRTIO_SCSI_EVENTQ], io); 914 } 915 916 static void 917 bdev_virtio_tmf_abort_nomem_cb(void *ctx) 918 { 919 struct spdk_bdev_io *bdev_io = ctx; 920 921 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 922 } 923 924 static void 925 bdev_virtio_tmf_abort_ioerr_cb(void *ctx) 926 { 927 struct spdk_bdev_io *bdev_io = ctx; 928 929 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 930 } 931 932 static void 933 bdev_virtio_tmf_abort(struct spdk_bdev_io *bdev_io, int status) 934 { 935 spdk_msg_fn fn; 936 937 if (status == -ENOMEM) { 938 fn = bdev_virtio_tmf_abort_nomem_cb; 939 } else { 940 fn = bdev_virtio_tmf_abort_ioerr_cb; 941 } 942 943 spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), fn, bdev_io); 944 } 945 946 static int 947 bdev_virtio_send_tmf_io(struct virtqueue *ctrlq, struct spdk_bdev_io *bdev_io) 948 { 949 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 950 int rc; 951 952 rc = virtqueue_req_start(ctrlq, bdev_io, 2); 953 if (rc != 0) { 954 return rc; 955 } 956 957 virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 958 virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 959 960 virtqueue_req_flush(ctrlq); 961 return 0; 962 } 963 964 static int 965 bdev_virtio_mgmt_poll(void *arg) 966 { 967 struct virtio_scsi_dev *svdev = arg; 968 struct virtio_dev *vdev = &svdev->vdev; 969 struct virtqueue *eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ]; 970 struct virtqueue *ctrlq = vdev->vqs[VIRTIO_SCSI_CONTROLQ]; 971 struct spdk_ring *send_ring = svdev->ctrlq_ring; 972 void *io[16]; 973 uint32_t io_len[16]; 974 uint16_t i, cnt; 975 int rc; 976 int total = 0; 977 978 cnt = spdk_ring_dequeue(send_ring, io, SPDK_COUNTOF(io)); 979 total += cnt; 980 for (i = 0; i < cnt; ++i) { 981 rc = bdev_virtio_send_tmf_io(ctrlq, io[i]); 982 if (rc != 0) { 983 bdev_virtio_tmf_abort(io[i], rc); 984 } 985 } 986 987 cnt = virtio_recv_pkts(ctrlq, io, io_len, SPDK_COUNTOF(io)); 988 total += cnt; 989 for (i = 0; i < cnt; ++i) { 990 bdev_virtio_tmf_cpl(io[i]); 991 } 992 993 cnt = virtio_recv_pkts(eventq, io, io_len, SPDK_COUNTOF(io)); 994 total += cnt; 995 for (i = 0; i < cnt; ++i) { 996 bdev_virtio_eventq_io_cpl(svdev, io[i]); 997 } 998 999 return total; 1000 } 1001 1002 static int 1003 bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf) 1004 { 1005 struct virtio_scsi_dev *svdev = io_device; 1006 struct virtio_dev *vdev = &svdev->vdev; 1007 struct bdev_virtio_io_channel *ch = ctx_buf; 1008 struct virtqueue *vq; 1009 int32_t queue_idx; 1010 1011 queue_idx = virtio_dev_find_and_acquire_queue(vdev, VIRTIO_SCSI_REQUESTQ); 1012 if (queue_idx < 0) { 1013 SPDK_ERRLOG("Couldn't get an unused queue for the io_channel.\n"); 1014 return -1; 1015 } 1016 1017 vq = vdev->vqs[queue_idx]; 1018 1019 ch->svdev = svdev; 1020 ch->vq = vq; 1021 1022 ch->poller = SPDK_POLLER_REGISTER(bdev_virtio_poll, ch, 0); 1023 1024 return 0; 1025 } 1026 1027 static void 1028 bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf) 1029 { 1030 struct bdev_virtio_io_channel *ch = ctx_buf; 1031 struct virtio_scsi_dev *svdev = ch->svdev; 1032 struct virtio_dev *vdev = &svdev->vdev; 1033 struct virtqueue *vq = ch->vq; 1034 1035 spdk_poller_unregister(&ch->poller); 1036 virtio_dev_release_queue(vdev, vq->vq_queue_index); 1037 } 1038 1039 static void 1040 _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum) 1041 { 1042 struct virtio_scsi_dev *svdev = base->svdev; 1043 size_t bdevs_cnt; 1044 struct spdk_bdev *bdevs[BDEV_VIRTIO_MAX_TARGET]; 1045 struct virtio_scsi_disk *disk; 1046 struct virtio_scsi_scan_info *tgt, *next_tgt; 1047 1048 spdk_put_io_channel(spdk_io_channel_from_ctx(base->channel)); 1049 base->svdev->scan_ctx = NULL; 1050 1051 TAILQ_FOREACH_SAFE(tgt, &base->scan_queue, tailq, next_tgt) { 1052 TAILQ_REMOVE(&base->scan_queue, tgt, tailq); 1053 free(tgt); 1054 } 1055 1056 if (base->cb_fn == NULL) { 1057 spdk_free(base); 1058 return; 1059 } 1060 1061 bdevs_cnt = 0; 1062 if (errnum == 0) { 1063 TAILQ_FOREACH(disk, &svdev->luns, link) { 1064 bdevs[bdevs_cnt] = &disk->bdev; 1065 bdevs_cnt++; 1066 } 1067 } 1068 1069 base->cb_fn(base->cb_arg, errnum, bdevs, bdevs_cnt); 1070 spdk_free(base); 1071 } 1072 1073 static int 1074 send_scan_io(struct virtio_scsi_scan_base *base) 1075 { 1076 struct virtio_scsi_io_ctx *io_ctx = &base->io_ctx; 1077 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1078 struct virtqueue *vq = base->channel->vq; 1079 int payload_iov_cnt = base->iov.iov_len > 0 ? 1 : 0; 1080 int rc; 1081 1082 req->lun[0] = 1; 1083 req->lun[1] = base->info.target; 1084 1085 rc = virtqueue_req_start(vq, io_ctx, 2 + payload_iov_cnt); 1086 if (rc != 0) { 1087 base->needs_resend = true; 1088 return -1; 1089 } 1090 1091 virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 1092 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 1093 virtqueue_req_add_iovs(vq, &base->iov, payload_iov_cnt, SPDK_VIRTIO_DESC_WR); 1094 1095 virtqueue_req_flush(vq); 1096 return 0; 1097 } 1098 1099 static int 1100 send_inquiry(struct virtio_scsi_scan_base *base) 1101 { 1102 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1103 struct spdk_scsi_cdb_inquiry *cdb; 1104 1105 memset(req, 0, sizeof(*req)); 1106 1107 base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE; 1108 cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1109 cdb->opcode = SPDK_SPC_INQUIRY; 1110 to_be16(cdb->alloc_len, BDEV_VIRTIO_SCAN_PAYLOAD_SIZE); 1111 1112 return send_scan_io(base); 1113 } 1114 1115 static int 1116 send_inquiry_vpd(struct virtio_scsi_scan_base *base, uint8_t page_code) 1117 { 1118 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1119 struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1120 1121 memset(req, 0, sizeof(*req)); 1122 1123 base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE; 1124 inquiry_cdb->opcode = SPDK_SPC_INQUIRY; 1125 inquiry_cdb->evpd = 1; 1126 inquiry_cdb->page_code = page_code; 1127 to_be16(inquiry_cdb->alloc_len, base->iov.iov_len); 1128 1129 return send_scan_io(base); 1130 } 1131 1132 static int 1133 send_read_cap_10(struct virtio_scsi_scan_base *base) 1134 { 1135 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1136 1137 memset(req, 0, sizeof(*req)); 1138 1139 base->iov.iov_len = 8; 1140 req->cdb[0] = SPDK_SBC_READ_CAPACITY_10; 1141 1142 return send_scan_io(base); 1143 } 1144 1145 static int 1146 send_read_cap_16(struct virtio_scsi_scan_base *base) 1147 { 1148 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1149 1150 memset(req, 0, sizeof(*req)); 1151 1152 base->iov.iov_len = 32; 1153 req->cdb[0] = SPDK_SPC_SERVICE_ACTION_IN_16; 1154 req->cdb[1] = SPDK_SBC_SAI_READ_CAPACITY_16; 1155 to_be32(&req->cdb[10], base->iov.iov_len); 1156 1157 return send_scan_io(base); 1158 } 1159 1160 static int 1161 send_test_unit_ready(struct virtio_scsi_scan_base *base) 1162 { 1163 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1164 1165 memset(req, 0, sizeof(*req)); 1166 req->cdb[0] = SPDK_SPC_TEST_UNIT_READY; 1167 base->iov.iov_len = 0; 1168 1169 return send_scan_io(base); 1170 } 1171 1172 static int 1173 send_start_stop_unit(struct virtio_scsi_scan_base *base) 1174 { 1175 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1176 1177 memset(req, 0, sizeof(*req)); 1178 req->cdb[0] = SPDK_SBC_START_STOP_UNIT; 1179 req->cdb[4] = SPDK_SBC_START_STOP_UNIT_START_BIT; 1180 base->iov.iov_len = 0; 1181 1182 return send_scan_io(base); 1183 } 1184 1185 static int 1186 process_scan_start_stop_unit(struct virtio_scsi_scan_base *base) 1187 { 1188 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1189 1190 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1191 return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES); 1192 } 1193 1194 return -1; 1195 } 1196 1197 static int 1198 process_scan_test_unit_ready(struct virtio_scsi_scan_base *base) 1199 { 1200 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1201 int sk, asc, ascq; 1202 1203 get_scsi_status(resp, &sk, &asc, &ascq); 1204 1205 /* check response, get VPD if spun up otherwise send SSU */ 1206 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1207 return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES); 1208 } else if (resp->response == VIRTIO_SCSI_S_OK && 1209 resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION && 1210 sk == SPDK_SCSI_SENSE_UNIT_ATTENTION && 1211 asc == SPDK_SCSI_ASC_LOGICAL_UNIT_NOT_READY) { 1212 return send_start_stop_unit(base); 1213 } else { 1214 return -1; 1215 } 1216 } 1217 1218 static int 1219 process_scan_inquiry_standard(struct virtio_scsi_scan_base *base) 1220 { 1221 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1222 struct spdk_scsi_cdb_inquiry_data *inquiry_data = 1223 (struct spdk_scsi_cdb_inquiry_data *)base->payload; 1224 1225 if (resp->status != SPDK_SCSI_STATUS_GOOD) { 1226 return -1; 1227 } 1228 1229 /* check to make sure its a supported device */ 1230 if (inquiry_data->peripheral_device_type != SPDK_SPC_PERIPHERAL_DEVICE_TYPE_DISK || 1231 inquiry_data->peripheral_qualifier != SPDK_SPC_PERIPHERAL_QUALIFIER_CONNECTED) { 1232 SPDK_WARNLOG("Unsupported peripheral device type 0x%02x (qualifier 0x%02x)\n", 1233 inquiry_data->peripheral_device_type, 1234 inquiry_data->peripheral_qualifier); 1235 return -1; 1236 } 1237 1238 return send_test_unit_ready(base); 1239 } 1240 1241 static int 1242 process_scan_inquiry_vpd_supported_vpd_pages(struct virtio_scsi_scan_base *base) 1243 { 1244 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1245 bool block_provisioning_page_supported = false; 1246 1247 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1248 const uint8_t *vpd_data = base->payload; 1249 const uint8_t *supported_vpd_pages = vpd_data + 4; 1250 uint16_t page_length; 1251 uint16_t num_supported_pages; 1252 uint16_t i; 1253 1254 page_length = from_be16(vpd_data + 2); 1255 num_supported_pages = spdk_min(page_length, base->iov.iov_len - 4); 1256 1257 for (i = 0; i < num_supported_pages; i++) { 1258 if (supported_vpd_pages[i] == SPDK_SPC_VPD_BLOCK_THIN_PROVISION) { 1259 block_provisioning_page_supported = true; 1260 break; 1261 } 1262 } 1263 } 1264 1265 if (block_provisioning_page_supported) { 1266 return send_inquiry_vpd(base, SPDK_SPC_VPD_BLOCK_THIN_PROVISION); 1267 } else { 1268 return send_read_cap_10(base); 1269 } 1270 } 1271 1272 static int 1273 process_scan_inquiry_vpd_block_thin_provision(struct virtio_scsi_scan_base *base) 1274 { 1275 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1276 1277 base->info.unmap_supported = false; 1278 1279 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1280 uint8_t *vpd_data = base->payload; 1281 1282 base->info.unmap_supported = !!(vpd_data[5] & SPDK_SCSI_UNMAP_LBPU); 1283 } 1284 1285 SPDK_INFOLOG(SPDK_LOG_VIRTIO, "Target %u: unmap supported = %d\n", 1286 base->info.target, (int)base->info.unmap_supported); 1287 1288 return send_read_cap_10(base); 1289 } 1290 1291 static int 1292 process_scan_inquiry(struct virtio_scsi_scan_base *base) 1293 { 1294 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1295 struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1296 1297 if ((inquiry_cdb->evpd & 1) == 0) { 1298 return process_scan_inquiry_standard(base); 1299 } 1300 1301 switch (inquiry_cdb->page_code) { 1302 case SPDK_SPC_VPD_SUPPORTED_VPD_PAGES: 1303 return process_scan_inquiry_vpd_supported_vpd_pages(base); 1304 case SPDK_SPC_VPD_BLOCK_THIN_PROVISION: 1305 return process_scan_inquiry_vpd_block_thin_provision(base); 1306 default: 1307 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO, "Unexpected VPD page 0x%02x\n", inquiry_cdb->page_code); 1308 return -1; 1309 } 1310 } 1311 1312 static void 1313 bdev_virtio_disc_notify_remove(void *remove_ctx) 1314 { 1315 struct virtio_scsi_disk *disk = remove_ctx; 1316 1317 disk->removed = true; 1318 spdk_bdev_close(disk->notify_desc); 1319 } 1320 1321 /* To be called only from the thread performing target scan */ 1322 static int 1323 virtio_scsi_dev_add_tgt(struct virtio_scsi_dev *svdev, struct virtio_scsi_scan_info *info) 1324 { 1325 struct virtio_scsi_disk *disk; 1326 struct spdk_bdev *bdev; 1327 int rc; 1328 1329 TAILQ_FOREACH(disk, &svdev->luns, link) { 1330 if (disk->info.target == info->target) { 1331 /* Target is already attached and param change is not supported */ 1332 return 0; 1333 } 1334 } 1335 1336 if (info->block_size == 0 || info->num_blocks == 0) { 1337 SPDK_ERRLOG("%s: invalid target %u: bs=%"PRIu32" blocks=%"PRIu64"\n", 1338 svdev->vdev.name, info->target, info->block_size, info->num_blocks); 1339 return -EINVAL; 1340 } 1341 1342 disk = calloc(1, sizeof(*disk)); 1343 if (disk == NULL) { 1344 SPDK_ERRLOG("could not allocate disk\n"); 1345 return -ENOMEM; 1346 } 1347 1348 disk->svdev = svdev; 1349 memcpy(&disk->info, info, sizeof(*info)); 1350 1351 bdev = &disk->bdev; 1352 bdev->name = spdk_sprintf_alloc("%st%"PRIu8, svdev->vdev.name, info->target); 1353 if (bdev->name == NULL) { 1354 SPDK_ERRLOG("Couldn't alloc memory for the bdev name.\n"); 1355 free(disk); 1356 return -ENOMEM; 1357 } 1358 1359 bdev->product_name = "Virtio SCSI Disk"; 1360 bdev->write_cache = 0; 1361 bdev->blocklen = disk->info.block_size; 1362 bdev->blockcnt = disk->info.num_blocks; 1363 1364 bdev->ctxt = disk; 1365 bdev->fn_table = &virtio_fn_table; 1366 bdev->module = &virtio_scsi_if; 1367 1368 rc = spdk_bdev_register(&disk->bdev); 1369 if (rc) { 1370 SPDK_ERRLOG("Failed to register bdev name=%s\n", disk->bdev.name); 1371 free(bdev->name); 1372 free(disk); 1373 return rc; 1374 } 1375 1376 rc = spdk_bdev_open(bdev, false, bdev_virtio_disc_notify_remove, disk, &disk->notify_desc); 1377 if (rc) { 1378 assert(false); 1379 } 1380 1381 TAILQ_INSERT_TAIL(&svdev->luns, disk, link); 1382 return 0; 1383 } 1384 1385 static int 1386 process_read_cap_10(struct virtio_scsi_scan_base *base) 1387 { 1388 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1389 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1390 uint64_t max_block; 1391 uint32_t block_size; 1392 uint8_t target_id = req->lun[1]; 1393 int rc; 1394 1395 if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) { 1396 SPDK_ERRLOG("READ CAPACITY (10) failed for target %"PRIu8".\n", target_id); 1397 return -1; 1398 } 1399 1400 block_size = from_be32(base->payload + 4); 1401 max_block = from_be32(base->payload); 1402 1403 if (max_block == 0xffffffff) { 1404 return send_read_cap_16(base); 1405 } 1406 1407 base->info.num_blocks = (uint64_t)max_block + 1; 1408 base->info.block_size = block_size; 1409 1410 rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info); 1411 if (rc != 0) { 1412 return rc; 1413 } 1414 1415 return _virtio_scsi_dev_scan_next(base, 0); 1416 } 1417 1418 static int 1419 process_read_cap_16(struct virtio_scsi_scan_base *base) 1420 { 1421 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1422 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1423 uint8_t target_id = req->lun[1]; 1424 int rc; 1425 1426 if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) { 1427 SPDK_ERRLOG("READ CAPACITY (16) failed for target %"PRIu8".\n", target_id); 1428 return -1; 1429 } 1430 1431 base->info.num_blocks = from_be64(base->payload) + 1; 1432 base->info.block_size = from_be32(base->payload + 8); 1433 rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info); 1434 if (rc != 0) { 1435 return rc; 1436 } 1437 1438 return _virtio_scsi_dev_scan_next(base, 0); 1439 } 1440 1441 static void 1442 process_scan_resp(struct virtio_scsi_scan_base *base) 1443 { 1444 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1445 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1446 int rc, sk, asc, ascq; 1447 uint8_t target_id; 1448 1449 if (base->io_ctx.iov_req.iov_len < sizeof(struct virtio_scsi_cmd_req) || 1450 base->io_ctx.iov_resp.iov_len < sizeof(struct virtio_scsi_cmd_resp)) { 1451 SPDK_ERRLOG("Received target scan message with invalid length.\n"); 1452 _virtio_scsi_dev_scan_next(base, -EIO); 1453 return; 1454 } 1455 1456 get_scsi_status(resp, &sk, &asc, &ascq); 1457 target_id = req->lun[1]; 1458 1459 if (resp->response == VIRTIO_SCSI_S_BAD_TARGET || 1460 resp->response == VIRTIO_SCSI_S_INCORRECT_LUN) { 1461 _virtio_scsi_dev_scan_next(base, -ENODEV); 1462 return; 1463 } 1464 1465 if (resp->response != VIRTIO_SCSI_S_OK || 1466 (resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION && 1467 sk != SPDK_SCSI_SENSE_ILLEGAL_REQUEST)) { 1468 assert(base->retries > 0); 1469 base->retries--; 1470 if (base->retries == 0) { 1471 SPDK_NOTICELOG("Target %"PRIu8" is present, but unavailable.\n", target_id); 1472 SPDK_LOGDUMP(SPDK_LOG_VIRTIO, "CDB", req->cdb, sizeof(req->cdb)); 1473 SPDK_LOGDUMP(SPDK_LOG_VIRTIO, "SENSE DATA", resp->sense, sizeof(resp->sense)); 1474 _virtio_scsi_dev_scan_next(base, -EBUSY); 1475 return; 1476 } 1477 1478 /* resend the same request */ 1479 rc = send_scan_io(base); 1480 if (rc != 0) { 1481 /* Let response poller do the resend */ 1482 } 1483 return; 1484 } 1485 1486 base->retries = SCAN_REQUEST_RETRIES; 1487 1488 switch (req->cdb[0]) { 1489 case SPDK_SPC_INQUIRY: 1490 rc = process_scan_inquiry(base); 1491 break; 1492 case SPDK_SPC_TEST_UNIT_READY: 1493 rc = process_scan_test_unit_ready(base); 1494 break; 1495 case SPDK_SBC_START_STOP_UNIT: 1496 rc = process_scan_start_stop_unit(base); 1497 break; 1498 case SPDK_SBC_READ_CAPACITY_10: 1499 rc = process_read_cap_10(base); 1500 break; 1501 case SPDK_SPC_SERVICE_ACTION_IN_16: 1502 rc = process_read_cap_16(base); 1503 break; 1504 default: 1505 SPDK_ERRLOG("Received invalid target scan message: cdb[0] = %"PRIu8".\n", req->cdb[0]); 1506 rc = -1; 1507 break; 1508 } 1509 1510 if (rc != 0) { 1511 if (base->needs_resend) { 1512 return; /* Let response poller do the resend */ 1513 } 1514 1515 _virtio_scsi_dev_scan_next(base, rc); 1516 } 1517 } 1518 1519 static int 1520 _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc) 1521 { 1522 struct virtio_scsi_scan_info *next; 1523 struct virtio_scsi_disk *disk; 1524 uint8_t target_id; 1525 1526 if (base->full_scan) { 1527 if (rc != 0) { 1528 disk = virtio_scsi_dev_get_disk_by_id(base->svdev, 1529 base->info.target); 1530 if (disk != NULL) { 1531 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 1532 } 1533 } 1534 1535 target_id = base->info.target + 1; 1536 if (target_id < BDEV_VIRTIO_MAX_TARGET) { 1537 _virtio_scsi_dev_scan_tgt(base, target_id); 1538 return 0; 1539 } 1540 1541 base->full_scan = false; 1542 } 1543 1544 next = TAILQ_FIRST(&base->scan_queue); 1545 if (next == NULL) { 1546 _virtio_scsi_dev_scan_finish(base, 0); 1547 return 0; 1548 } 1549 1550 TAILQ_REMOVE(&base->scan_queue, next, tailq); 1551 target_id = next->target; 1552 free(next); 1553 1554 _virtio_scsi_dev_scan_tgt(base, target_id); 1555 return 0; 1556 } 1557 1558 static int 1559 virtio_pci_scsi_dev_enumerate_cb(struct virtio_pci_ctx *pci_ctx, void *ctx) 1560 { 1561 struct virtio_scsi_dev *svdev; 1562 1563 svdev = virtio_pci_scsi_dev_create(NULL, pci_ctx); 1564 return svdev == NULL ? -1 : 0; 1565 } 1566 1567 static int 1568 bdev_virtio_process_config(void) 1569 { 1570 struct spdk_conf_section *sp; 1571 struct virtio_scsi_dev *svdev; 1572 char *default_name = NULL; 1573 char *path, *type, *name; 1574 unsigned vdev_num; 1575 int num_queues; 1576 bool enable_pci; 1577 int rc = 0; 1578 1579 for (sp = spdk_conf_first_section(NULL); sp != NULL; sp = spdk_conf_next_section(sp)) { 1580 if (!spdk_conf_section_match_prefix(sp, "VirtioUser")) { 1581 continue; 1582 } 1583 1584 if (sscanf(spdk_conf_section_get_name(sp), "VirtioUser%u", &vdev_num) != 1) { 1585 SPDK_ERRLOG("Section '%s' has non-numeric suffix.\n", 1586 spdk_conf_section_get_name(sp)); 1587 rc = -1; 1588 goto out; 1589 } 1590 1591 path = spdk_conf_section_get_val(sp, "Path"); 1592 if (path == NULL) { 1593 SPDK_ERRLOG("VirtioUser%u: missing Path\n", vdev_num); 1594 rc = -1; 1595 goto out; 1596 } 1597 1598 type = spdk_conf_section_get_val(sp, "Type"); 1599 if (type != NULL && strcmp(type, "SCSI") != 0) { 1600 continue; 1601 } 1602 1603 num_queues = spdk_conf_section_get_intval(sp, "Queues"); 1604 if (num_queues < 1) { 1605 num_queues = 1; 1606 } else if (num_queues > SPDK_VIRTIO_MAX_VIRTQUEUES) { 1607 num_queues = SPDK_VIRTIO_MAX_VIRTQUEUES; 1608 } 1609 1610 name = spdk_conf_section_get_val(sp, "Name"); 1611 if (name == NULL) { 1612 default_name = spdk_sprintf_alloc("VirtioScsi%u", vdev_num); 1613 name = default_name; 1614 } 1615 1616 svdev = virtio_user_scsi_dev_create(name, path, num_queues, 512); 1617 free(default_name); 1618 default_name = NULL; 1619 1620 if (svdev == NULL) { 1621 rc = -1; 1622 goto out; 1623 } 1624 } 1625 1626 sp = spdk_conf_find_section(NULL, "VirtioPci"); 1627 if (sp == NULL) { 1628 return 0; 1629 } 1630 1631 enable_pci = spdk_conf_section_get_boolval(sp, "Enable", false); 1632 if (enable_pci) { 1633 rc = virtio_pci_dev_enumerate(virtio_pci_scsi_dev_enumerate_cb, NULL, 1634 PCI_DEVICE_ID_VIRTIO_SCSI_MODERN); 1635 } 1636 1637 out: 1638 return rc; 1639 } 1640 1641 static int 1642 _virtio_scsi_dev_scan_init(struct virtio_scsi_dev *svdev) 1643 { 1644 struct virtio_scsi_scan_base *base; 1645 struct spdk_io_channel *io_ch; 1646 struct virtio_scsi_io_ctx *io_ctx; 1647 struct virtio_scsi_cmd_req *req; 1648 struct virtio_scsi_cmd_resp *resp; 1649 1650 io_ch = spdk_get_io_channel(svdev); 1651 if (io_ch == NULL) { 1652 return -EBUSY; 1653 } 1654 1655 base = spdk_zmalloc(sizeof(*base), 64, NULL, 1656 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1657 if (base == NULL) { 1658 SPDK_ERRLOG("couldn't allocate memory for scsi target scan.\n"); 1659 return -ENOMEM; 1660 } 1661 1662 base->svdev = svdev; 1663 1664 base->channel = spdk_io_channel_get_ctx(io_ch); 1665 TAILQ_INIT(&base->scan_queue); 1666 svdev->scan_ctx = base; 1667 1668 base->iov.iov_base = base->payload; 1669 io_ctx = &base->io_ctx; 1670 req = &io_ctx->req; 1671 resp = &io_ctx->resp; 1672 io_ctx->iov_req.iov_base = req; 1673 io_ctx->iov_req.iov_len = sizeof(*req); 1674 io_ctx->iov_resp.iov_base = resp; 1675 io_ctx->iov_resp.iov_len = sizeof(*resp); 1676 1677 base->retries = SCAN_REQUEST_RETRIES; 1678 return 0; 1679 } 1680 1681 static void 1682 _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target) 1683 { 1684 int rc; 1685 1686 memset(&base->info, 0, sizeof(base->info)); 1687 base->info.target = target; 1688 1689 rc = send_inquiry(base); 1690 if (rc) { 1691 /* Let response poller do the resend */ 1692 } 1693 } 1694 1695 static int 1696 virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev, bdev_virtio_create_cb cb_fn, 1697 void *cb_arg) 1698 { 1699 struct virtio_scsi_scan_base *base; 1700 struct virtio_scsi_scan_info *tgt, *next_tgt; 1701 int rc; 1702 1703 if (svdev->scan_ctx) { 1704 if (svdev->scan_ctx->full_scan) { 1705 return -EEXIST; 1706 } 1707 1708 /* We're about to start a full rescan, so there's no need 1709 * to scan particular targets afterwards. 1710 */ 1711 TAILQ_FOREACH_SAFE(tgt, &svdev->scan_ctx->scan_queue, tailq, next_tgt) { 1712 TAILQ_REMOVE(&svdev->scan_ctx->scan_queue, tgt, tailq); 1713 free(tgt); 1714 } 1715 1716 svdev->scan_ctx->cb_fn = cb_fn; 1717 svdev->scan_ctx->cb_arg = cb_arg; 1718 svdev->scan_ctx->restart = true; 1719 return 0; 1720 } 1721 1722 rc = _virtio_scsi_dev_scan_init(svdev); 1723 if (rc != 0) { 1724 return rc; 1725 } 1726 1727 base = svdev->scan_ctx; 1728 base->cb_fn = cb_fn; 1729 base->cb_arg = cb_arg; 1730 base->full_scan = true; 1731 1732 _virtio_scsi_dev_scan_tgt(base, 0); 1733 return 0; 1734 } 1735 1736 static int 1737 virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target) 1738 { 1739 struct virtio_scsi_scan_base *base; 1740 struct virtio_scsi_scan_info *info; 1741 int rc; 1742 1743 base = svdev->scan_ctx; 1744 if (base) { 1745 info = calloc(1, sizeof(*info)); 1746 if (info == NULL) { 1747 SPDK_ERRLOG("calloc failed\n"); 1748 return -ENOMEM; 1749 } 1750 1751 info->target = target; 1752 TAILQ_INSERT_TAIL(&base->scan_queue, info, tailq); 1753 return 0; 1754 } 1755 1756 rc = _virtio_scsi_dev_scan_init(svdev); 1757 if (rc != 0) { 1758 return rc; 1759 } 1760 1761 base = svdev->scan_ctx; 1762 base->full_scan = true; 1763 _virtio_scsi_dev_scan_tgt(base, target); 1764 return 0; 1765 } 1766 1767 static void 1768 bdev_virtio_initial_scan_complete(void *ctx, int result, 1769 struct spdk_bdev **bdevs, size_t bdevs_cnt) 1770 { 1771 struct virtio_scsi_dev *svdev; 1772 1773 pthread_mutex_lock(&g_virtio_scsi_mutex); 1774 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 1775 if (svdev->scan_ctx) { 1776 /* another device is still being scanned */ 1777 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1778 return; 1779 } 1780 } 1781 1782 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1783 spdk_bdev_module_init_done(&virtio_scsi_if); 1784 } 1785 1786 static int 1787 bdev_virtio_initialize(void) 1788 { 1789 struct virtio_scsi_dev *svdev, *next_svdev; 1790 int rc; 1791 1792 rc = bdev_virtio_process_config(); 1793 pthread_mutex_lock(&g_virtio_scsi_mutex); 1794 1795 if (rc != 0) { 1796 goto err_unlock; 1797 } 1798 1799 if (TAILQ_EMPTY(&g_virtio_scsi_devs)) { 1800 goto out_unlock; 1801 } 1802 1803 /* Initialize all created devices and scan available targets */ 1804 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 1805 rc = virtio_scsi_dev_scan(svdev, bdev_virtio_initial_scan_complete, NULL); 1806 if (rc != 0) { 1807 goto err_unlock; 1808 } 1809 } 1810 1811 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1812 return 0; 1813 1814 err_unlock: 1815 /* Remove any created devices */ 1816 TAILQ_FOREACH_SAFE(svdev, &g_virtio_scsi_devs, tailq, next_svdev) { 1817 virtio_scsi_dev_remove(svdev, NULL, NULL); 1818 } 1819 1820 out_unlock: 1821 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1822 spdk_bdev_module_init_done(&virtio_scsi_if); 1823 return rc; 1824 } 1825 1826 static void 1827 _virtio_scsi_dev_unregister_cb(void *io_device) 1828 { 1829 struct virtio_scsi_dev *svdev = io_device; 1830 struct virtio_dev *vdev = &svdev->vdev; 1831 bool finish_module; 1832 bdev_virtio_remove_cb remove_cb; 1833 void *remove_ctx; 1834 1835 assert(spdk_ring_count(svdev->ctrlq_ring) == 0); 1836 spdk_ring_free(svdev->ctrlq_ring); 1837 spdk_poller_unregister(&svdev->mgmt_poller); 1838 1839 virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ); 1840 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 1841 1842 virtio_dev_stop(vdev); 1843 virtio_dev_destruct(vdev); 1844 1845 pthread_mutex_lock(&g_virtio_scsi_mutex); 1846 TAILQ_REMOVE(&g_virtio_scsi_devs, svdev, tailq); 1847 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1848 1849 remove_cb = svdev->remove_cb; 1850 remove_ctx = svdev->remove_ctx; 1851 spdk_free(svdev->eventq_ios); 1852 free(svdev); 1853 1854 if (remove_cb) { 1855 remove_cb(remove_ctx, 0); 1856 } 1857 1858 finish_module = TAILQ_EMPTY(&g_virtio_scsi_devs); 1859 1860 if (g_bdev_virtio_finish && finish_module) { 1861 spdk_bdev_module_finish_done(); 1862 } 1863 } 1864 1865 static void 1866 virtio_scsi_dev_unregister_cb(void *io_device) 1867 { 1868 struct virtio_scsi_dev *svdev = io_device; 1869 struct spdk_thread *thread; 1870 1871 thread = virtio_dev_queue_get_thread(&svdev->vdev, VIRTIO_SCSI_CONTROLQ); 1872 spdk_thread_send_msg(thread, _virtio_scsi_dev_unregister_cb, io_device); 1873 } 1874 1875 static void 1876 virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev, 1877 bdev_virtio_remove_cb cb_fn, void *cb_arg) 1878 { 1879 struct virtio_scsi_disk *disk, *disk_tmp; 1880 bool do_remove = true; 1881 1882 if (svdev->removed) { 1883 if (cb_fn) { 1884 cb_fn(cb_arg, -EBUSY); 1885 } 1886 return; 1887 } 1888 1889 svdev->remove_cb = cb_fn; 1890 svdev->remove_ctx = cb_arg; 1891 svdev->removed = true; 1892 1893 if (svdev->scan_ctx) { 1894 /* The removal will continue after we receive a pending scan I/O. */ 1895 return; 1896 } 1897 1898 TAILQ_FOREACH_SAFE(disk, &svdev->luns, link, disk_tmp) { 1899 if (!disk->removed) { 1900 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 1901 } 1902 do_remove = false; 1903 } 1904 1905 if (do_remove) { 1906 spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb); 1907 } 1908 } 1909 1910 static void 1911 bdev_virtio_finish(void) 1912 { 1913 struct virtio_scsi_dev *svdev, *next; 1914 1915 g_bdev_virtio_finish = true; 1916 1917 pthread_mutex_lock(&g_virtio_scsi_mutex); 1918 if (TAILQ_EMPTY(&g_virtio_scsi_devs)) { 1919 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1920 spdk_bdev_module_finish_done(); 1921 return; 1922 } 1923 1924 /* Defer module finish until all controllers are removed. */ 1925 TAILQ_FOREACH_SAFE(svdev, &g_virtio_scsi_devs, tailq, next) { 1926 virtio_scsi_dev_remove(svdev, NULL, NULL); 1927 } 1928 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1929 } 1930 1931 int 1932 bdev_virtio_user_scsi_dev_create(const char *base_name, const char *path, 1933 unsigned num_queues, unsigned queue_size, 1934 bdev_virtio_create_cb cb_fn, void *cb_arg) 1935 { 1936 struct virtio_scsi_dev *svdev; 1937 int rc; 1938 1939 svdev = virtio_user_scsi_dev_create(base_name, path, num_queues, queue_size); 1940 if (svdev == NULL) { 1941 return -1; 1942 } 1943 1944 rc = virtio_scsi_dev_scan(svdev, cb_fn, cb_arg); 1945 if (rc) { 1946 virtio_scsi_dev_remove(svdev, NULL, NULL); 1947 } 1948 1949 return rc; 1950 } 1951 1952 struct bdev_virtio_pci_dev_create_ctx { 1953 const char *name; 1954 bdev_virtio_create_cb cb_fn; 1955 void *cb_arg; 1956 }; 1957 1958 static int 1959 bdev_virtio_pci_scsi_dev_create_cb(struct virtio_pci_ctx *pci_ctx, void *ctx) 1960 { 1961 struct virtio_scsi_dev *svdev; 1962 struct bdev_virtio_pci_dev_create_ctx *create_ctx = ctx; 1963 int rc; 1964 1965 svdev = virtio_pci_scsi_dev_create(create_ctx->name, pci_ctx); 1966 if (svdev == NULL) { 1967 return -1; 1968 } 1969 1970 rc = virtio_scsi_dev_scan(svdev, create_ctx->cb_fn, create_ctx->cb_arg); 1971 if (rc) { 1972 svdev->vdev.ctx = NULL; 1973 virtio_scsi_dev_remove(svdev, NULL, NULL); 1974 } 1975 1976 return rc; 1977 } 1978 1979 int 1980 bdev_virtio_pci_scsi_dev_create(const char *name, struct spdk_pci_addr *pci_addr, 1981 bdev_virtio_create_cb cb_fn, void *cb_arg) 1982 { 1983 struct bdev_virtio_pci_dev_create_ctx create_ctx; 1984 1985 create_ctx.name = name; 1986 create_ctx.cb_fn = cb_fn; 1987 create_ctx.cb_arg = cb_arg; 1988 1989 return virtio_pci_dev_attach(bdev_virtio_pci_scsi_dev_create_cb, &create_ctx, 1990 PCI_DEVICE_ID_VIRTIO_SCSI_MODERN, pci_addr); 1991 } 1992 1993 int 1994 bdev_virtio_scsi_dev_remove(const char *name, bdev_virtio_remove_cb cb_fn, void *cb_arg) 1995 { 1996 struct virtio_scsi_dev *svdev; 1997 1998 pthread_mutex_lock(&g_virtio_scsi_mutex); 1999 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 2000 if (strcmp(svdev->vdev.name, name) == 0) { 2001 break; 2002 } 2003 } 2004 2005 if (svdev == NULL) { 2006 pthread_mutex_unlock(&g_virtio_scsi_mutex); 2007 SPDK_ERRLOG("Cannot find Virtio-SCSI device named '%s'\n", name); 2008 return -ENODEV; 2009 } 2010 2011 virtio_scsi_dev_remove(svdev, cb_fn, cb_arg); 2012 pthread_mutex_unlock(&g_virtio_scsi_mutex); 2013 2014 return 0; 2015 } 2016 2017 void 2018 bdev_virtio_scsi_dev_list(struct spdk_json_write_ctx *w) 2019 { 2020 struct virtio_scsi_dev *svdev; 2021 2022 spdk_json_write_array_begin(w); 2023 2024 pthread_mutex_lock(&g_virtio_scsi_mutex); 2025 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 2026 spdk_json_write_object_begin(w); 2027 2028 spdk_json_write_named_string(w, "name", svdev->vdev.name); 2029 2030 virtio_dev_dump_json_info(&svdev->vdev, w); 2031 2032 spdk_json_write_object_end(w); 2033 } 2034 pthread_mutex_unlock(&g_virtio_scsi_mutex); 2035 2036 spdk_json_write_array_end(w); 2037 } 2038 2039 SPDK_LOG_REGISTER_COMPONENT("virtio", SPDK_LOG_VIRTIO) 2040