1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/bdev.h" 37 #include "spdk/endian.h" 38 #include "spdk/env.h" 39 #include "spdk/thread.h" 40 #include "spdk/scsi_spec.h" 41 #include "spdk/string.h" 42 #include "spdk/util.h" 43 #include "spdk/json.h" 44 45 #include "spdk/bdev_module.h" 46 #include "spdk/log.h" 47 #include "spdk_internal/virtio.h" 48 #include "spdk_internal/vhost_user.h" 49 50 #include <linux/virtio_scsi.h> 51 52 #include "bdev_virtio.h" 53 54 #define BDEV_VIRTIO_MAX_TARGET 64 55 #define BDEV_VIRTIO_SCAN_PAYLOAD_SIZE 256 56 #define MGMT_POLL_PERIOD_US (1000 * 5) 57 #define CTRLQ_RING_SIZE 16 58 #define SCAN_REQUEST_RETRIES 5 59 60 /* Number of non-request queues - eventq and controlq */ 61 #define SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED 2 62 63 #define VIRTIO_SCSI_EVENTQ_BUFFER_COUNT 16 64 65 #define VIRTIO_SCSI_CONTROLQ 0 66 #define VIRTIO_SCSI_EVENTQ 1 67 #define VIRTIO_SCSI_REQUESTQ 2 68 69 static int bdev_virtio_initialize(void); 70 static void bdev_virtio_finish(void); 71 72 struct virtio_scsi_dev { 73 /* Generic virtio device data. */ 74 struct virtio_dev vdev; 75 76 /** Detected SCSI LUNs */ 77 TAILQ_HEAD(, virtio_scsi_disk) luns; 78 79 /** Context for the SCSI target scan. */ 80 struct virtio_scsi_scan_base *scan_ctx; 81 82 /** Controlq poller. */ 83 struct spdk_poller *mgmt_poller; 84 85 /** Controlq messages to be sent. */ 86 struct spdk_ring *ctrlq_ring; 87 88 /** Buffers for the eventq. */ 89 struct virtio_scsi_eventq_io *eventq_ios; 90 91 /** Device marked for removal. */ 92 bool removed; 93 94 /** Callback to be called after vdev removal. */ 95 bdev_virtio_remove_cb remove_cb; 96 97 /** Context for the `remove_cb`. */ 98 void *remove_ctx; 99 100 TAILQ_ENTRY(virtio_scsi_dev) tailq; 101 }; 102 103 struct virtio_scsi_io_ctx { 104 struct iovec iov_req; 105 struct iovec iov_resp; 106 union { 107 struct virtio_scsi_cmd_req req; 108 struct virtio_scsi_ctrl_tmf_req tmf_req; 109 }; 110 union { 111 struct virtio_scsi_cmd_resp resp; 112 struct virtio_scsi_ctrl_tmf_resp tmf_resp; 113 }; 114 }; 115 116 struct virtio_scsi_eventq_io { 117 struct iovec iov; 118 struct virtio_scsi_event ev; 119 }; 120 121 struct virtio_scsi_scan_info { 122 uint64_t num_blocks; 123 uint32_t block_size; 124 uint8_t target; 125 bool unmap_supported; 126 TAILQ_ENTRY(virtio_scsi_scan_info) tailq; 127 }; 128 129 struct virtio_scsi_scan_base { 130 struct virtio_scsi_dev *svdev; 131 132 /** I/O channel used for the scan I/O. */ 133 struct bdev_virtio_io_channel *channel; 134 135 bdev_virtio_create_cb cb_fn; 136 void *cb_arg; 137 138 /** Scan all targets on the device. */ 139 bool full_scan; 140 141 /** Start a full rescan after receiving next scan I/O response. */ 142 bool restart; 143 144 /** Additional targets to be (re)scanned. */ 145 TAILQ_HEAD(, virtio_scsi_scan_info) scan_queue; 146 147 /** Remaining attempts for sending the current request. */ 148 unsigned retries; 149 150 /** If set, the last scan I/O needs to be resent */ 151 bool needs_resend; 152 153 struct virtio_scsi_io_ctx io_ctx; 154 struct iovec iov; 155 uint8_t payload[BDEV_VIRTIO_SCAN_PAYLOAD_SIZE]; 156 157 /** Scan results for the current target. */ 158 struct virtio_scsi_scan_info info; 159 }; 160 161 struct virtio_scsi_disk { 162 struct spdk_bdev bdev; 163 struct virtio_scsi_dev *svdev; 164 struct virtio_scsi_scan_info info; 165 166 /** Descriptor opened just to be notified of external bdev hotremove. */ 167 struct spdk_bdev_desc *notify_desc; 168 169 /** Disk marked for removal. */ 170 bool removed; 171 TAILQ_ENTRY(virtio_scsi_disk) link; 172 }; 173 174 struct bdev_virtio_io_channel { 175 struct virtio_scsi_dev *svdev; 176 177 /** Virtqueue exclusively assigned to this channel. */ 178 struct virtqueue *vq; 179 180 /** Virtio response poller. */ 181 struct spdk_poller *poller; 182 }; 183 184 static TAILQ_HEAD(, virtio_scsi_dev) g_virtio_scsi_devs = 185 TAILQ_HEAD_INITIALIZER(g_virtio_scsi_devs); 186 187 static pthread_mutex_t g_virtio_scsi_mutex = PTHREAD_MUTEX_INITIALIZER; 188 189 /** Module finish in progress */ 190 static bool g_bdev_virtio_finish = false; 191 192 /* Features desired/implemented by this driver. */ 193 #define VIRTIO_SCSI_DEV_SUPPORTED_FEATURES \ 194 (1ULL << VIRTIO_SCSI_F_INOUT | \ 195 1ULL << VIRTIO_SCSI_F_HOTPLUG | \ 196 1ULL << VIRTIO_RING_F_EVENT_IDX | \ 197 1ULL << VHOST_USER_F_PROTOCOL_FEATURES) 198 199 static void virtio_scsi_dev_unregister_cb(void *io_device); 200 static void virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev, 201 bdev_virtio_remove_cb cb_fn, void *cb_arg); 202 static int bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf); 203 static void bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf); 204 static void process_scan_resp(struct virtio_scsi_scan_base *base); 205 static int bdev_virtio_mgmt_poll(void *arg); 206 207 static int 208 virtio_scsi_dev_send_eventq_io(struct virtqueue *vq, struct virtio_scsi_eventq_io *io) 209 { 210 int rc; 211 212 rc = virtqueue_req_start(vq, io, 1); 213 if (rc != 0) { 214 return -1; 215 } 216 217 virtqueue_req_add_iovs(vq, &io->iov, 1, SPDK_VIRTIO_DESC_WR); 218 virtqueue_req_flush(vq); 219 220 return 0; 221 } 222 223 static int 224 virtio_scsi_dev_init(struct virtio_scsi_dev *svdev, uint16_t max_queues) 225 { 226 struct virtio_dev *vdev = &svdev->vdev; 227 struct spdk_ring *ctrlq_ring; 228 struct virtio_scsi_eventq_io *eventq_io; 229 struct virtqueue *eventq; 230 uint16_t i, num_events; 231 int rc; 232 233 rc = virtio_dev_reset(vdev, VIRTIO_SCSI_DEV_SUPPORTED_FEATURES); 234 if (rc != 0) { 235 return rc; 236 } 237 238 rc = virtio_dev_start(vdev, max_queues, SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED); 239 if (rc != 0) { 240 return rc; 241 } 242 243 ctrlq_ring = spdk_ring_create(SPDK_RING_TYPE_MP_SC, CTRLQ_RING_SIZE, 244 SPDK_ENV_SOCKET_ID_ANY); 245 if (ctrlq_ring == NULL) { 246 SPDK_ERRLOG("Failed to allocate send ring for the controlq.\n"); 247 return -1; 248 } 249 250 rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_CONTROLQ); 251 if (rc != 0) { 252 SPDK_ERRLOG("Failed to acquire the controlq.\n"); 253 spdk_ring_free(ctrlq_ring); 254 return -1; 255 } 256 257 rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_EVENTQ); 258 if (rc != 0) { 259 SPDK_ERRLOG("Failed to acquire the eventq.\n"); 260 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 261 spdk_ring_free(ctrlq_ring); 262 return -1; 263 } 264 265 eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ]; 266 num_events = spdk_min(eventq->vq_nentries, VIRTIO_SCSI_EVENTQ_BUFFER_COUNT); 267 svdev->eventq_ios = spdk_zmalloc(sizeof(*svdev->eventq_ios) * num_events, 268 0, NULL, SPDK_ENV_LCORE_ID_ANY, 269 SPDK_MALLOC_DMA); 270 if (svdev->eventq_ios == NULL) { 271 SPDK_ERRLOG("cannot allocate memory for %"PRIu16" eventq buffers\n", 272 num_events); 273 virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ); 274 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 275 spdk_ring_free(ctrlq_ring); 276 return -1; 277 } 278 279 for (i = 0; i < num_events; i++) { 280 eventq_io = &svdev->eventq_ios[i]; 281 eventq_io->iov.iov_base = &eventq_io->ev; 282 eventq_io->iov.iov_len = sizeof(eventq_io->ev); 283 virtio_scsi_dev_send_eventq_io(eventq, eventq_io); 284 } 285 286 svdev->ctrlq_ring = ctrlq_ring; 287 288 svdev->mgmt_poller = SPDK_POLLER_REGISTER(bdev_virtio_mgmt_poll, svdev, 289 MGMT_POLL_PERIOD_US); 290 291 TAILQ_INIT(&svdev->luns); 292 svdev->scan_ctx = NULL; 293 svdev->removed = false; 294 svdev->remove_cb = NULL; 295 svdev->remove_ctx = NULL; 296 297 spdk_io_device_register(svdev, bdev_virtio_scsi_ch_create_cb, 298 bdev_virtio_scsi_ch_destroy_cb, 299 sizeof(struct bdev_virtio_io_channel), 300 svdev->vdev.name); 301 302 pthread_mutex_lock(&g_virtio_scsi_mutex); 303 TAILQ_INSERT_TAIL(&g_virtio_scsi_devs, svdev, tailq); 304 pthread_mutex_unlock(&g_virtio_scsi_mutex); 305 return 0; 306 } 307 308 static struct virtio_scsi_dev * 309 virtio_pci_scsi_dev_create(const char *name, struct virtio_pci_ctx *pci_ctx) 310 { 311 static int pci_dev_counter = 0; 312 struct virtio_scsi_dev *svdev; 313 struct virtio_dev *vdev; 314 char *default_name = NULL; 315 uint32_t num_queues; 316 int rc; 317 318 svdev = calloc(1, sizeof(*svdev)); 319 if (svdev == NULL) { 320 SPDK_ERRLOG("virtio device calloc failed\n"); 321 return NULL; 322 } 323 324 vdev = &svdev->vdev; 325 if (name == NULL) { 326 default_name = spdk_sprintf_alloc("VirtioScsi%"PRIu32, pci_dev_counter++); 327 if (default_name == NULL) { 328 free(vdev); 329 return NULL; 330 } 331 name = default_name; 332 } 333 334 rc = virtio_pci_dev_init(vdev, name, pci_ctx); 335 free(default_name); 336 337 if (rc != 0) { 338 free(svdev); 339 return NULL; 340 } 341 342 rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_scsi_config, num_queues), 343 &num_queues, sizeof(num_queues)); 344 if (rc) { 345 SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc)); 346 goto fail; 347 } 348 349 rc = virtio_scsi_dev_init(svdev, num_queues); 350 if (rc != 0) { 351 goto fail; 352 } 353 354 return svdev; 355 356 fail: 357 vdev->ctx = NULL; 358 virtio_dev_destruct(vdev); 359 free(svdev); 360 return NULL; 361 } 362 363 static struct virtio_scsi_dev * 364 virtio_user_scsi_dev_create(const char *name, const char *path, 365 uint16_t num_queues, uint32_t queue_size) 366 { 367 struct virtio_scsi_dev *svdev; 368 struct virtio_dev *vdev; 369 int rc; 370 371 svdev = calloc(1, sizeof(*svdev)); 372 if (svdev == NULL) { 373 SPDK_ERRLOG("calloc failed for virtio device %s: %s\n", name, path); 374 return NULL; 375 } 376 377 vdev = &svdev->vdev; 378 rc = virtio_user_dev_init(vdev, name, path, queue_size); 379 if (rc != 0) { 380 SPDK_ERRLOG("Failed to create virito device %s: %s\n", name, path); 381 free(svdev); 382 return NULL; 383 } 384 385 rc = virtio_scsi_dev_init(svdev, num_queues); 386 if (rc != 0) { 387 virtio_dev_destruct(vdev); 388 free(svdev); 389 return NULL; 390 } 391 392 return svdev; 393 } 394 395 static struct virtio_scsi_disk * 396 virtio_scsi_dev_get_disk_by_id(struct virtio_scsi_dev *svdev, uint8_t target_id) 397 { 398 struct virtio_scsi_disk *disk; 399 400 TAILQ_FOREACH(disk, &svdev->luns, link) { 401 if (disk->info.target == target_id) { 402 return disk; 403 } 404 } 405 406 return NULL; 407 } 408 409 static int virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev, 410 bdev_virtio_create_cb cb_fn, void *cb_arg); 411 static int send_scan_io(struct virtio_scsi_scan_base *base); 412 static void _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target); 413 static int _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc); 414 static void _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum); 415 static int virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target); 416 417 static int 418 bdev_virtio_get_ctx_size(void) 419 { 420 return sizeof(struct virtio_scsi_io_ctx); 421 } 422 423 static int 424 bdev_virtio_scsi_config_json(struct spdk_json_write_ctx *w) 425 { 426 struct virtio_scsi_dev *svdev; 427 428 pthread_mutex_lock(&g_virtio_scsi_mutex); 429 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 430 spdk_json_write_object_begin(w); 431 432 spdk_json_write_named_string(w, "method", "bdev_virtio_attach_controller"); 433 434 spdk_json_write_named_object_begin(w, "params"); 435 spdk_json_write_named_string(w, "name", svdev->vdev.name); 436 spdk_json_write_named_string(w, "dev_type", "scsi"); 437 438 /* Write transport specific parameters. */ 439 svdev->vdev.backend_ops->write_json_config(&svdev->vdev, w); 440 441 spdk_json_write_object_end(w); 442 443 spdk_json_write_object_end(w); 444 445 } 446 pthread_mutex_unlock(&g_virtio_scsi_mutex); 447 448 return 0; 449 } 450 451 452 static struct spdk_bdev_module virtio_scsi_if = { 453 .name = "virtio_scsi", 454 .module_init = bdev_virtio_initialize, 455 .module_fini = bdev_virtio_finish, 456 .get_ctx_size = bdev_virtio_get_ctx_size, 457 .config_json = bdev_virtio_scsi_config_json, 458 .async_fini = true, 459 }; 460 461 SPDK_BDEV_MODULE_REGISTER(virtio_scsi, &virtio_scsi_if) 462 463 static struct virtio_scsi_io_ctx * 464 bdev_virtio_init_io_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 465 { 466 struct virtio_scsi_cmd_req *req; 467 struct virtio_scsi_cmd_resp *resp; 468 struct virtio_scsi_disk *disk = (struct virtio_scsi_disk *)bdev_io->bdev; 469 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 470 471 req = &io_ctx->req; 472 resp = &io_ctx->resp; 473 474 io_ctx->iov_req.iov_base = req; 475 io_ctx->iov_req.iov_len = sizeof(*req); 476 477 io_ctx->iov_resp.iov_base = resp; 478 io_ctx->iov_resp.iov_len = sizeof(*resp); 479 480 memset(req, 0, sizeof(*req)); 481 req->lun[0] = 1; 482 req->lun[1] = disk->info.target; 483 484 return io_ctx; 485 } 486 487 static struct virtio_scsi_io_ctx * 488 bdev_virtio_init_tmf_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 489 { 490 struct virtio_scsi_ctrl_tmf_req *tmf_req; 491 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 492 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 493 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 494 495 tmf_req = &io_ctx->tmf_req; 496 tmf_resp = &io_ctx->tmf_resp; 497 498 io_ctx->iov_req.iov_base = tmf_req; 499 io_ctx->iov_req.iov_len = sizeof(*tmf_req); 500 io_ctx->iov_resp.iov_base = tmf_resp; 501 io_ctx->iov_resp.iov_len = sizeof(*tmf_resp); 502 503 memset(tmf_req, 0, sizeof(*tmf_req)); 504 tmf_req->lun[0] = 1; 505 tmf_req->lun[1] = disk->info.target; 506 507 return io_ctx; 508 } 509 510 static void 511 bdev_virtio_send_io(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 512 { 513 struct bdev_virtio_io_channel *virtio_channel = spdk_io_channel_get_ctx(ch); 514 struct virtqueue *vq = virtio_channel->vq; 515 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 516 int rc; 517 518 rc = virtqueue_req_start(vq, bdev_io, bdev_io->u.bdev.iovcnt + 2); 519 if (rc == -ENOMEM) { 520 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 521 return; 522 } else if (rc != 0) { 523 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 524 return; 525 } 526 527 virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 528 if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 529 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 530 virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 531 SPDK_VIRTIO_DESC_WR); 532 } else { 533 virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 534 SPDK_VIRTIO_DESC_RO); 535 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 536 } 537 538 virtqueue_req_flush(vq); 539 } 540 541 static void 542 bdev_virtio_rw(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 543 { 544 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 545 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io); 546 struct virtio_scsi_cmd_req *req = &io_ctx->req; 547 bool is_write = bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE; 548 549 if (disk->info.num_blocks > (1ULL << 32)) { 550 req->cdb[0] = is_write ? SPDK_SBC_WRITE_16 : SPDK_SBC_READ_16; 551 to_be64(&req->cdb[2], bdev_io->u.bdev.offset_blocks); 552 to_be32(&req->cdb[10], bdev_io->u.bdev.num_blocks); 553 } else { 554 req->cdb[0] = is_write ? SPDK_SBC_WRITE_10 : SPDK_SBC_READ_10; 555 to_be32(&req->cdb[2], bdev_io->u.bdev.offset_blocks); 556 to_be16(&req->cdb[7], bdev_io->u.bdev.num_blocks); 557 } 558 559 bdev_virtio_send_io(ch, bdev_io); 560 } 561 562 static void 563 bdev_virtio_reset(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 564 { 565 struct bdev_virtio_io_channel *virtio_ch = spdk_io_channel_get_ctx(ch); 566 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_tmf_vreq(ch, bdev_io); 567 struct virtio_scsi_ctrl_tmf_req *tmf_req = &io_ctx->tmf_req; 568 struct virtio_scsi_dev *svdev = virtio_ch->svdev; 569 size_t enqueued_count; 570 571 tmf_req->type = VIRTIO_SCSI_T_TMF; 572 tmf_req->subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET; 573 574 enqueued_count = spdk_ring_enqueue(svdev->ctrlq_ring, (void **)&bdev_io, 1, NULL); 575 if (spdk_likely(enqueued_count == 1)) { 576 return; 577 } else { 578 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 579 } 580 } 581 582 static void 583 bdev_virtio_unmap(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success) 584 { 585 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io); 586 struct virtio_scsi_cmd_req *req = &io_ctx->req; 587 struct spdk_scsi_unmap_bdesc *desc, *first_desc; 588 uint8_t *buf; 589 uint64_t offset_blocks, num_blocks; 590 uint16_t cmd_len; 591 592 if (!success) { 593 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 594 return; 595 } 596 597 buf = bdev_io->u.bdev.iovs[0].iov_base; 598 599 offset_blocks = bdev_io->u.bdev.offset_blocks; 600 num_blocks = bdev_io->u.bdev.num_blocks; 601 602 /* (n-1) * 16-byte descriptors */ 603 first_desc = desc = (struct spdk_scsi_unmap_bdesc *)&buf[8]; 604 while (num_blocks > UINT32_MAX) { 605 to_be64(&desc->lba, offset_blocks); 606 to_be32(&desc->block_count, UINT32_MAX); 607 memset(&desc->reserved, 0, sizeof(desc->reserved)); 608 offset_blocks += UINT32_MAX; 609 num_blocks -= UINT32_MAX; 610 desc++; 611 } 612 613 /* The last descriptor with block_count <= UINT32_MAX */ 614 to_be64(&desc->lba, offset_blocks); 615 to_be32(&desc->block_count, num_blocks); 616 memset(&desc->reserved, 0, sizeof(desc->reserved)); 617 618 /* 8-byte header + n * 16-byte block descriptor */ 619 cmd_len = 8 + (desc - first_desc + 1) * sizeof(struct spdk_scsi_unmap_bdesc); 620 621 req->cdb[0] = SPDK_SBC_UNMAP; 622 to_be16(&req->cdb[7], cmd_len); 623 624 /* 8-byte header */ 625 to_be16(&buf[0], cmd_len - 2); /* total length (excluding the length field) */ 626 to_be16(&buf[2], cmd_len - 8); /* length of block descriptors */ 627 memset(&buf[4], 0, 4); /* reserved */ 628 629 bdev_virtio_send_io(ch, bdev_io); 630 } 631 632 static void 633 bdev_virtio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 634 bool success) 635 { 636 if (!success) { 637 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 638 return; 639 } 640 641 bdev_virtio_rw(ch, bdev_io); 642 } 643 644 static int _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 645 { 646 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 647 648 switch (bdev_io->type) { 649 case SPDK_BDEV_IO_TYPE_READ: 650 spdk_bdev_io_get_buf(bdev_io, bdev_virtio_get_buf_cb, 651 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 652 return 0; 653 case SPDK_BDEV_IO_TYPE_WRITE: 654 bdev_virtio_rw(ch, bdev_io); 655 return 0; 656 case SPDK_BDEV_IO_TYPE_RESET: 657 bdev_virtio_reset(ch, bdev_io); 658 return 0; 659 case SPDK_BDEV_IO_TYPE_UNMAP: { 660 uint64_t buf_len = 8 /* header size */ + 661 (bdev_io->u.bdev.num_blocks + UINT32_MAX - 1) / 662 UINT32_MAX * sizeof(struct spdk_scsi_unmap_bdesc); 663 664 if (!disk->info.unmap_supported) { 665 return -1; 666 } 667 668 if (buf_len > SPDK_BDEV_LARGE_BUF_MAX_SIZE) { 669 SPDK_ERRLOG("Trying to UNMAP too many blocks: %"PRIu64"\n", 670 bdev_io->u.bdev.num_blocks); 671 return -1; 672 } 673 spdk_bdev_io_get_buf(bdev_io, bdev_virtio_unmap, buf_len); 674 return 0; 675 } 676 case SPDK_BDEV_IO_TYPE_FLUSH: 677 default: 678 return -1; 679 } 680 return 0; 681 } 682 683 static void bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 684 { 685 if (_bdev_virtio_submit_request(ch, bdev_io) < 0) { 686 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 687 } 688 } 689 690 static bool 691 bdev_virtio_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) 692 { 693 struct virtio_scsi_disk *disk = ctx; 694 695 switch (io_type) { 696 case SPDK_BDEV_IO_TYPE_READ: 697 case SPDK_BDEV_IO_TYPE_WRITE: 698 case SPDK_BDEV_IO_TYPE_FLUSH: 699 case SPDK_BDEV_IO_TYPE_RESET: 700 return true; 701 702 case SPDK_BDEV_IO_TYPE_UNMAP: 703 return disk->info.unmap_supported; 704 705 default: 706 return false; 707 } 708 } 709 710 static struct spdk_io_channel * 711 bdev_virtio_get_io_channel(void *ctx) 712 { 713 struct virtio_scsi_disk *disk = ctx; 714 715 return spdk_get_io_channel(disk->svdev); 716 } 717 718 static int 719 bdev_virtio_disk_destruct(void *ctx) 720 { 721 struct virtio_scsi_disk *disk = ctx; 722 struct virtio_scsi_dev *svdev = disk->svdev; 723 724 TAILQ_REMOVE(&svdev->luns, disk, link); 725 free(disk->bdev.name); 726 free(disk); 727 728 if (svdev->removed && TAILQ_EMPTY(&svdev->luns)) { 729 spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb); 730 } 731 732 return 0; 733 } 734 735 static int 736 bdev_virtio_dump_info_json(void *ctx, struct spdk_json_write_ctx *w) 737 { 738 struct virtio_scsi_disk *disk = ctx; 739 740 virtio_dev_dump_json_info(&disk->svdev->vdev, w); 741 return 0; 742 } 743 744 static void 745 bdev_virtio_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w) 746 { 747 /* SCSI targets and LUNS are discovered during scan process so nothing 748 * to save here. 749 */ 750 } 751 752 static const struct spdk_bdev_fn_table virtio_fn_table = { 753 .destruct = bdev_virtio_disk_destruct, 754 .submit_request = bdev_virtio_submit_request, 755 .io_type_supported = bdev_virtio_io_type_supported, 756 .get_io_channel = bdev_virtio_get_io_channel, 757 .dump_info_json = bdev_virtio_dump_info_json, 758 .write_config_json = bdev_virtio_write_config_json, 759 }; 760 761 static void 762 get_scsi_status(struct virtio_scsi_cmd_resp *resp, int *sk, int *asc, int *ascq) 763 { 764 /* see spdk_scsi_task_build_sense_data() for sense data details */ 765 *sk = 0; 766 *asc = 0; 767 *ascq = 0; 768 769 if (resp->sense_len < 3) { 770 return; 771 } 772 773 *sk = resp->sense[2] & 0xf; 774 775 if (resp->sense_len < 13) { 776 return; 777 } 778 779 *asc = resp->sense[12]; 780 781 if (resp->sense_len < 14) { 782 return; 783 } 784 785 *ascq = resp->sense[13]; 786 } 787 788 static void 789 bdev_virtio_io_cpl(struct spdk_bdev_io *bdev_io) 790 { 791 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 792 int sk, asc, ascq; 793 794 get_scsi_status(&io_ctx->resp, &sk, &asc, &ascq); 795 spdk_bdev_io_complete_scsi_status(bdev_io, io_ctx->resp.status, sk, asc, ascq); 796 } 797 798 static int 799 bdev_virtio_poll(void *arg) 800 { 801 struct bdev_virtio_io_channel *ch = arg; 802 struct virtio_scsi_dev *svdev = ch->svdev; 803 struct virtio_scsi_scan_base *scan_ctx = svdev->scan_ctx; 804 void *io[32]; 805 uint32_t io_len[32]; 806 uint16_t i, cnt; 807 int rc; 808 809 cnt = virtio_recv_pkts(ch->vq, (void **)io, io_len, SPDK_COUNTOF(io)); 810 for (i = 0; i < cnt; ++i) { 811 if (spdk_unlikely(scan_ctx && io[i] == &scan_ctx->io_ctx)) { 812 if (svdev->removed) { 813 _virtio_scsi_dev_scan_finish(scan_ctx, -EINTR); 814 return SPDK_POLLER_BUSY; 815 } 816 817 if (scan_ctx->restart) { 818 scan_ctx->restart = false; 819 scan_ctx->full_scan = true; 820 _virtio_scsi_dev_scan_tgt(scan_ctx, 0); 821 continue; 822 } 823 824 process_scan_resp(scan_ctx); 825 continue; 826 } 827 828 bdev_virtio_io_cpl(io[i]); 829 } 830 831 if (spdk_unlikely(scan_ctx && scan_ctx->needs_resend)) { 832 if (svdev->removed) { 833 _virtio_scsi_dev_scan_finish(scan_ctx, -EINTR); 834 return SPDK_POLLER_BUSY; 835 } else if (cnt == 0) { 836 return SPDK_POLLER_IDLE; 837 } 838 839 rc = send_scan_io(scan_ctx); 840 if (rc != 0) { 841 assert(scan_ctx->retries > 0); 842 scan_ctx->retries--; 843 if (scan_ctx->retries == 0) { 844 SPDK_ERRLOG("Target scan failed unrecoverably with rc = %d.\n", rc); 845 _virtio_scsi_dev_scan_finish(scan_ctx, rc); 846 } 847 } 848 } 849 850 return cnt; 851 } 852 853 static void 854 bdev_virtio_tmf_cpl_cb(void *ctx) 855 { 856 struct spdk_bdev_io *bdev_io = ctx; 857 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 858 859 if (io_ctx->tmf_resp.response == VIRTIO_SCSI_S_OK) { 860 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 861 } else { 862 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 863 } 864 } 865 866 static void 867 bdev_virtio_tmf_cpl(struct spdk_bdev_io *bdev_io) 868 { 869 spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), bdev_virtio_tmf_cpl_cb, bdev_io); 870 } 871 872 static void 873 bdev_virtio_eventq_io_cpl(struct virtio_scsi_dev *svdev, struct virtio_scsi_eventq_io *io) 874 { 875 struct virtio_scsi_event *ev = &io->ev; 876 struct virtio_scsi_disk *disk; 877 878 if (ev->lun[0] != 1) { 879 SPDK_WARNLOG("Received an event with invalid data layout.\n"); 880 goto out; 881 } 882 883 if (ev->event & VIRTIO_SCSI_T_EVENTS_MISSED) { 884 ev->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED; 885 virtio_scsi_dev_scan(svdev, NULL, NULL); 886 } 887 888 switch (ev->event) { 889 case VIRTIO_SCSI_T_NO_EVENT: 890 break; 891 case VIRTIO_SCSI_T_TRANSPORT_RESET: 892 switch (ev->reason) { 893 case VIRTIO_SCSI_EVT_RESET_RESCAN: 894 virtio_scsi_dev_scan_tgt(svdev, ev->lun[1]); 895 break; 896 case VIRTIO_SCSI_EVT_RESET_REMOVED: 897 disk = virtio_scsi_dev_get_disk_by_id(svdev, ev->lun[1]); 898 if (disk != NULL) { 899 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 900 } 901 break; 902 default: 903 break; 904 } 905 break; 906 default: 907 break; 908 } 909 910 out: 911 virtio_scsi_dev_send_eventq_io(svdev->vdev.vqs[VIRTIO_SCSI_EVENTQ], io); 912 } 913 914 static void 915 bdev_virtio_tmf_abort_nomem_cb(void *ctx) 916 { 917 struct spdk_bdev_io *bdev_io = ctx; 918 919 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 920 } 921 922 static void 923 bdev_virtio_tmf_abort_ioerr_cb(void *ctx) 924 { 925 struct spdk_bdev_io *bdev_io = ctx; 926 927 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 928 } 929 930 static void 931 bdev_virtio_tmf_abort(struct spdk_bdev_io *bdev_io, int status) 932 { 933 spdk_msg_fn fn; 934 935 if (status == -ENOMEM) { 936 fn = bdev_virtio_tmf_abort_nomem_cb; 937 } else { 938 fn = bdev_virtio_tmf_abort_ioerr_cb; 939 } 940 941 spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), fn, bdev_io); 942 } 943 944 static int 945 bdev_virtio_send_tmf_io(struct virtqueue *ctrlq, struct spdk_bdev_io *bdev_io) 946 { 947 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 948 int rc; 949 950 rc = virtqueue_req_start(ctrlq, bdev_io, 2); 951 if (rc != 0) { 952 return rc; 953 } 954 955 virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 956 virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 957 958 virtqueue_req_flush(ctrlq); 959 return 0; 960 } 961 962 static int 963 bdev_virtio_mgmt_poll(void *arg) 964 { 965 struct virtio_scsi_dev *svdev = arg; 966 struct virtio_dev *vdev = &svdev->vdev; 967 struct virtqueue *eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ]; 968 struct virtqueue *ctrlq = vdev->vqs[VIRTIO_SCSI_CONTROLQ]; 969 struct spdk_ring *send_ring = svdev->ctrlq_ring; 970 void *io[16]; 971 uint32_t io_len[16]; 972 uint16_t i, cnt; 973 int rc; 974 int total = 0; 975 976 cnt = spdk_ring_dequeue(send_ring, io, SPDK_COUNTOF(io)); 977 total += cnt; 978 for (i = 0; i < cnt; ++i) { 979 rc = bdev_virtio_send_tmf_io(ctrlq, io[i]); 980 if (rc != 0) { 981 bdev_virtio_tmf_abort(io[i], rc); 982 } 983 } 984 985 cnt = virtio_recv_pkts(ctrlq, io, io_len, SPDK_COUNTOF(io)); 986 total += cnt; 987 for (i = 0; i < cnt; ++i) { 988 bdev_virtio_tmf_cpl(io[i]); 989 } 990 991 cnt = virtio_recv_pkts(eventq, io, io_len, SPDK_COUNTOF(io)); 992 total += cnt; 993 for (i = 0; i < cnt; ++i) { 994 bdev_virtio_eventq_io_cpl(svdev, io[i]); 995 } 996 997 return total; 998 } 999 1000 static int 1001 bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf) 1002 { 1003 struct virtio_scsi_dev *svdev = io_device; 1004 struct virtio_dev *vdev = &svdev->vdev; 1005 struct bdev_virtio_io_channel *ch = ctx_buf; 1006 struct virtqueue *vq; 1007 int32_t queue_idx; 1008 1009 queue_idx = virtio_dev_find_and_acquire_queue(vdev, VIRTIO_SCSI_REQUESTQ); 1010 if (queue_idx < 0) { 1011 SPDK_ERRLOG("Couldn't get an unused queue for the io_channel.\n"); 1012 return -1; 1013 } 1014 1015 vq = vdev->vqs[queue_idx]; 1016 1017 ch->svdev = svdev; 1018 ch->vq = vq; 1019 1020 ch->poller = SPDK_POLLER_REGISTER(bdev_virtio_poll, ch, 0); 1021 1022 return 0; 1023 } 1024 1025 static void 1026 bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf) 1027 { 1028 struct bdev_virtio_io_channel *ch = ctx_buf; 1029 struct virtio_scsi_dev *svdev = ch->svdev; 1030 struct virtio_dev *vdev = &svdev->vdev; 1031 struct virtqueue *vq = ch->vq; 1032 1033 spdk_poller_unregister(&ch->poller); 1034 virtio_dev_release_queue(vdev, vq->vq_queue_index); 1035 } 1036 1037 static void 1038 _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum) 1039 { 1040 struct virtio_scsi_dev *svdev = base->svdev; 1041 size_t bdevs_cnt; 1042 struct spdk_bdev *bdevs[BDEV_VIRTIO_MAX_TARGET]; 1043 struct virtio_scsi_disk *disk; 1044 struct virtio_scsi_scan_info *tgt, *next_tgt; 1045 1046 spdk_put_io_channel(spdk_io_channel_from_ctx(base->channel)); 1047 base->svdev->scan_ctx = NULL; 1048 1049 TAILQ_FOREACH_SAFE(tgt, &base->scan_queue, tailq, next_tgt) { 1050 TAILQ_REMOVE(&base->scan_queue, tgt, tailq); 1051 free(tgt); 1052 } 1053 1054 if (base->cb_fn == NULL) { 1055 spdk_free(base); 1056 return; 1057 } 1058 1059 bdevs_cnt = 0; 1060 if (errnum == 0) { 1061 TAILQ_FOREACH(disk, &svdev->luns, link) { 1062 bdevs[bdevs_cnt] = &disk->bdev; 1063 bdevs_cnt++; 1064 } 1065 } 1066 1067 base->cb_fn(base->cb_arg, errnum, bdevs, bdevs_cnt); 1068 spdk_free(base); 1069 } 1070 1071 static int 1072 send_scan_io(struct virtio_scsi_scan_base *base) 1073 { 1074 struct virtio_scsi_io_ctx *io_ctx = &base->io_ctx; 1075 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1076 struct virtqueue *vq = base->channel->vq; 1077 int payload_iov_cnt = base->iov.iov_len > 0 ? 1 : 0; 1078 int rc; 1079 1080 req->lun[0] = 1; 1081 req->lun[1] = base->info.target; 1082 1083 rc = virtqueue_req_start(vq, io_ctx, 2 + payload_iov_cnt); 1084 if (rc != 0) { 1085 base->needs_resend = true; 1086 return -1; 1087 } 1088 1089 virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 1090 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 1091 virtqueue_req_add_iovs(vq, &base->iov, payload_iov_cnt, SPDK_VIRTIO_DESC_WR); 1092 1093 virtqueue_req_flush(vq); 1094 return 0; 1095 } 1096 1097 static int 1098 send_inquiry(struct virtio_scsi_scan_base *base) 1099 { 1100 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1101 struct spdk_scsi_cdb_inquiry *cdb; 1102 1103 memset(req, 0, sizeof(*req)); 1104 1105 base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE; 1106 cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1107 cdb->opcode = SPDK_SPC_INQUIRY; 1108 to_be16(cdb->alloc_len, BDEV_VIRTIO_SCAN_PAYLOAD_SIZE); 1109 1110 return send_scan_io(base); 1111 } 1112 1113 static int 1114 send_inquiry_vpd(struct virtio_scsi_scan_base *base, uint8_t page_code) 1115 { 1116 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1117 struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1118 1119 memset(req, 0, sizeof(*req)); 1120 1121 base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE; 1122 inquiry_cdb->opcode = SPDK_SPC_INQUIRY; 1123 inquiry_cdb->evpd = 1; 1124 inquiry_cdb->page_code = page_code; 1125 to_be16(inquiry_cdb->alloc_len, base->iov.iov_len); 1126 1127 return send_scan_io(base); 1128 } 1129 1130 static int 1131 send_read_cap_10(struct virtio_scsi_scan_base *base) 1132 { 1133 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1134 1135 memset(req, 0, sizeof(*req)); 1136 1137 base->iov.iov_len = 8; 1138 req->cdb[0] = SPDK_SBC_READ_CAPACITY_10; 1139 1140 return send_scan_io(base); 1141 } 1142 1143 static int 1144 send_read_cap_16(struct virtio_scsi_scan_base *base) 1145 { 1146 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1147 1148 memset(req, 0, sizeof(*req)); 1149 1150 base->iov.iov_len = 32; 1151 req->cdb[0] = SPDK_SPC_SERVICE_ACTION_IN_16; 1152 req->cdb[1] = SPDK_SBC_SAI_READ_CAPACITY_16; 1153 to_be32(&req->cdb[10], base->iov.iov_len); 1154 1155 return send_scan_io(base); 1156 } 1157 1158 static int 1159 send_test_unit_ready(struct virtio_scsi_scan_base *base) 1160 { 1161 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1162 1163 memset(req, 0, sizeof(*req)); 1164 req->cdb[0] = SPDK_SPC_TEST_UNIT_READY; 1165 base->iov.iov_len = 0; 1166 1167 return send_scan_io(base); 1168 } 1169 1170 static int 1171 send_start_stop_unit(struct virtio_scsi_scan_base *base) 1172 { 1173 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1174 1175 memset(req, 0, sizeof(*req)); 1176 req->cdb[0] = SPDK_SBC_START_STOP_UNIT; 1177 req->cdb[4] = SPDK_SBC_START_STOP_UNIT_START_BIT; 1178 base->iov.iov_len = 0; 1179 1180 return send_scan_io(base); 1181 } 1182 1183 static int 1184 process_scan_start_stop_unit(struct virtio_scsi_scan_base *base) 1185 { 1186 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1187 1188 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1189 return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES); 1190 } 1191 1192 return -1; 1193 } 1194 1195 static int 1196 process_scan_test_unit_ready(struct virtio_scsi_scan_base *base) 1197 { 1198 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1199 int sk, asc, ascq; 1200 1201 get_scsi_status(resp, &sk, &asc, &ascq); 1202 1203 /* check response, get VPD if spun up otherwise send SSU */ 1204 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1205 return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES); 1206 } else if (resp->response == VIRTIO_SCSI_S_OK && 1207 resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION && 1208 sk == SPDK_SCSI_SENSE_UNIT_ATTENTION && 1209 asc == SPDK_SCSI_ASC_LOGICAL_UNIT_NOT_READY) { 1210 return send_start_stop_unit(base); 1211 } else { 1212 return -1; 1213 } 1214 } 1215 1216 static int 1217 process_scan_inquiry_standard(struct virtio_scsi_scan_base *base) 1218 { 1219 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1220 struct spdk_scsi_cdb_inquiry_data *inquiry_data = 1221 (struct spdk_scsi_cdb_inquiry_data *)base->payload; 1222 1223 if (resp->status != SPDK_SCSI_STATUS_GOOD) { 1224 return -1; 1225 } 1226 1227 /* check to make sure its a supported device */ 1228 if (inquiry_data->peripheral_device_type != SPDK_SPC_PERIPHERAL_DEVICE_TYPE_DISK || 1229 inquiry_data->peripheral_qualifier != SPDK_SPC_PERIPHERAL_QUALIFIER_CONNECTED) { 1230 SPDK_WARNLOG("Unsupported peripheral device type 0x%02x (qualifier 0x%02x)\n", 1231 inquiry_data->peripheral_device_type, 1232 inquiry_data->peripheral_qualifier); 1233 return -1; 1234 } 1235 1236 return send_test_unit_ready(base); 1237 } 1238 1239 static int 1240 process_scan_inquiry_vpd_supported_vpd_pages(struct virtio_scsi_scan_base *base) 1241 { 1242 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1243 bool block_provisioning_page_supported = false; 1244 1245 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1246 const uint8_t *vpd_data = base->payload; 1247 const uint8_t *supported_vpd_pages = vpd_data + 4; 1248 uint16_t page_length; 1249 uint16_t num_supported_pages; 1250 uint16_t i; 1251 1252 page_length = from_be16(vpd_data + 2); 1253 num_supported_pages = spdk_min(page_length, base->iov.iov_len - 4); 1254 1255 for (i = 0; i < num_supported_pages; i++) { 1256 if (supported_vpd_pages[i] == SPDK_SPC_VPD_BLOCK_THIN_PROVISION) { 1257 block_provisioning_page_supported = true; 1258 break; 1259 } 1260 } 1261 } 1262 1263 if (block_provisioning_page_supported) { 1264 return send_inquiry_vpd(base, SPDK_SPC_VPD_BLOCK_THIN_PROVISION); 1265 } else { 1266 return send_read_cap_10(base); 1267 } 1268 } 1269 1270 static int 1271 process_scan_inquiry_vpd_block_thin_provision(struct virtio_scsi_scan_base *base) 1272 { 1273 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1274 1275 base->info.unmap_supported = false; 1276 1277 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1278 uint8_t *vpd_data = base->payload; 1279 1280 base->info.unmap_supported = !!(vpd_data[5] & SPDK_SCSI_UNMAP_LBPU); 1281 } 1282 1283 SPDK_INFOLOG(virtio, "Target %u: unmap supported = %d\n", 1284 base->info.target, (int)base->info.unmap_supported); 1285 1286 return send_read_cap_10(base); 1287 } 1288 1289 static int 1290 process_scan_inquiry(struct virtio_scsi_scan_base *base) 1291 { 1292 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1293 struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1294 1295 if ((inquiry_cdb->evpd & 1) == 0) { 1296 return process_scan_inquiry_standard(base); 1297 } 1298 1299 switch (inquiry_cdb->page_code) { 1300 case SPDK_SPC_VPD_SUPPORTED_VPD_PAGES: 1301 return process_scan_inquiry_vpd_supported_vpd_pages(base); 1302 case SPDK_SPC_VPD_BLOCK_THIN_PROVISION: 1303 return process_scan_inquiry_vpd_block_thin_provision(base); 1304 default: 1305 SPDK_DEBUGLOG(virtio, "Unexpected VPD page 0x%02x\n", inquiry_cdb->page_code); 1306 return -1; 1307 } 1308 } 1309 1310 static void 1311 bdev_virtio_disk_notify_remove(struct virtio_scsi_disk *disk) 1312 { 1313 disk->removed = true; 1314 spdk_bdev_close(disk->notify_desc); 1315 } 1316 1317 static void 1318 bdev_virtio_disk_notify_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 1319 void *event_ctx) 1320 { 1321 switch (type) { 1322 case SPDK_BDEV_EVENT_REMOVE: 1323 bdev_virtio_disk_notify_remove(event_ctx); 1324 break; 1325 default: 1326 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 1327 break; 1328 } 1329 } 1330 1331 /* To be called only from the thread performing target scan */ 1332 static int 1333 virtio_scsi_dev_add_tgt(struct virtio_scsi_dev *svdev, struct virtio_scsi_scan_info *info) 1334 { 1335 struct virtio_scsi_disk *disk; 1336 struct spdk_bdev *bdev; 1337 int rc; 1338 1339 TAILQ_FOREACH(disk, &svdev->luns, link) { 1340 if (disk->info.target == info->target) { 1341 /* Target is already attached and param change is not supported */ 1342 return 0; 1343 } 1344 } 1345 1346 if (info->block_size == 0 || info->num_blocks == 0) { 1347 SPDK_ERRLOG("%s: invalid target %u: bs=%"PRIu32" blocks=%"PRIu64"\n", 1348 svdev->vdev.name, info->target, info->block_size, info->num_blocks); 1349 return -EINVAL; 1350 } 1351 1352 disk = calloc(1, sizeof(*disk)); 1353 if (disk == NULL) { 1354 SPDK_ERRLOG("could not allocate disk\n"); 1355 return -ENOMEM; 1356 } 1357 1358 disk->svdev = svdev; 1359 memcpy(&disk->info, info, sizeof(*info)); 1360 1361 bdev = &disk->bdev; 1362 bdev->name = spdk_sprintf_alloc("%st%"PRIu8, svdev->vdev.name, info->target); 1363 if (bdev->name == NULL) { 1364 SPDK_ERRLOG("Couldn't alloc memory for the bdev name.\n"); 1365 free(disk); 1366 return -ENOMEM; 1367 } 1368 1369 bdev->product_name = "Virtio SCSI Disk"; 1370 bdev->write_cache = 0; 1371 bdev->blocklen = disk->info.block_size; 1372 bdev->blockcnt = disk->info.num_blocks; 1373 1374 bdev->ctxt = disk; 1375 bdev->fn_table = &virtio_fn_table; 1376 bdev->module = &virtio_scsi_if; 1377 1378 rc = spdk_bdev_register(&disk->bdev); 1379 if (rc) { 1380 SPDK_ERRLOG("Failed to register bdev name=%s\n", disk->bdev.name); 1381 free(bdev->name); 1382 free(disk); 1383 return rc; 1384 } 1385 1386 rc = spdk_bdev_open_ext(bdev->name, false, bdev_virtio_disk_notify_event_cb, 1387 disk, &disk->notify_desc); 1388 if (rc) { 1389 assert(false); 1390 } 1391 1392 TAILQ_INSERT_TAIL(&svdev->luns, disk, link); 1393 return 0; 1394 } 1395 1396 static int 1397 process_read_cap_10(struct virtio_scsi_scan_base *base) 1398 { 1399 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1400 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1401 uint64_t max_block; 1402 uint32_t block_size; 1403 uint8_t target_id = req->lun[1]; 1404 int rc; 1405 1406 if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) { 1407 SPDK_ERRLOG("READ CAPACITY (10) failed for target %"PRIu8".\n", target_id); 1408 return -1; 1409 } 1410 1411 block_size = from_be32(base->payload + 4); 1412 max_block = from_be32(base->payload); 1413 1414 if (max_block == 0xffffffff) { 1415 return send_read_cap_16(base); 1416 } 1417 1418 base->info.num_blocks = (uint64_t)max_block + 1; 1419 base->info.block_size = block_size; 1420 1421 rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info); 1422 if (rc != 0) { 1423 return rc; 1424 } 1425 1426 return _virtio_scsi_dev_scan_next(base, 0); 1427 } 1428 1429 static int 1430 process_read_cap_16(struct virtio_scsi_scan_base *base) 1431 { 1432 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1433 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1434 uint8_t target_id = req->lun[1]; 1435 int rc; 1436 1437 if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) { 1438 SPDK_ERRLOG("READ CAPACITY (16) failed for target %"PRIu8".\n", target_id); 1439 return -1; 1440 } 1441 1442 base->info.num_blocks = from_be64(base->payload) + 1; 1443 base->info.block_size = from_be32(base->payload + 8); 1444 rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info); 1445 if (rc != 0) { 1446 return rc; 1447 } 1448 1449 return _virtio_scsi_dev_scan_next(base, 0); 1450 } 1451 1452 static void 1453 process_scan_resp(struct virtio_scsi_scan_base *base) 1454 { 1455 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1456 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1457 int rc, sk, asc, ascq; 1458 uint8_t target_id; 1459 1460 if (base->io_ctx.iov_req.iov_len < sizeof(struct virtio_scsi_cmd_req) || 1461 base->io_ctx.iov_resp.iov_len < sizeof(struct virtio_scsi_cmd_resp)) { 1462 SPDK_ERRLOG("Received target scan message with invalid length.\n"); 1463 _virtio_scsi_dev_scan_next(base, -EIO); 1464 return; 1465 } 1466 1467 get_scsi_status(resp, &sk, &asc, &ascq); 1468 target_id = req->lun[1]; 1469 1470 if (resp->response == VIRTIO_SCSI_S_BAD_TARGET || 1471 resp->response == VIRTIO_SCSI_S_INCORRECT_LUN) { 1472 _virtio_scsi_dev_scan_next(base, -ENODEV); 1473 return; 1474 } 1475 1476 if (resp->response != VIRTIO_SCSI_S_OK || 1477 (resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION && 1478 sk != SPDK_SCSI_SENSE_ILLEGAL_REQUEST)) { 1479 assert(base->retries > 0); 1480 base->retries--; 1481 if (base->retries == 0) { 1482 SPDK_NOTICELOG("Target %"PRIu8" is present, but unavailable.\n", target_id); 1483 SPDK_LOGDUMP(virtio, "CDB", req->cdb, sizeof(req->cdb)); 1484 SPDK_LOGDUMP(virtio, "SENSE DATA", resp->sense, sizeof(resp->sense)); 1485 _virtio_scsi_dev_scan_next(base, -EBUSY); 1486 return; 1487 } 1488 1489 /* resend the same request */ 1490 rc = send_scan_io(base); 1491 if (rc != 0) { 1492 /* Let response poller do the resend */ 1493 } 1494 return; 1495 } 1496 1497 base->retries = SCAN_REQUEST_RETRIES; 1498 1499 switch (req->cdb[0]) { 1500 case SPDK_SPC_INQUIRY: 1501 rc = process_scan_inquiry(base); 1502 break; 1503 case SPDK_SPC_TEST_UNIT_READY: 1504 rc = process_scan_test_unit_ready(base); 1505 break; 1506 case SPDK_SBC_START_STOP_UNIT: 1507 rc = process_scan_start_stop_unit(base); 1508 break; 1509 case SPDK_SBC_READ_CAPACITY_10: 1510 rc = process_read_cap_10(base); 1511 break; 1512 case SPDK_SPC_SERVICE_ACTION_IN_16: 1513 rc = process_read_cap_16(base); 1514 break; 1515 default: 1516 SPDK_ERRLOG("Received invalid target scan message: cdb[0] = %"PRIu8".\n", req->cdb[0]); 1517 rc = -1; 1518 break; 1519 } 1520 1521 if (rc != 0) { 1522 if (base->needs_resend) { 1523 return; /* Let response poller do the resend */ 1524 } 1525 1526 _virtio_scsi_dev_scan_next(base, rc); 1527 } 1528 } 1529 1530 static int 1531 _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc) 1532 { 1533 struct virtio_scsi_scan_info *next; 1534 struct virtio_scsi_disk *disk; 1535 uint8_t target_id; 1536 1537 if (base->full_scan) { 1538 if (rc != 0) { 1539 disk = virtio_scsi_dev_get_disk_by_id(base->svdev, 1540 base->info.target); 1541 if (disk != NULL) { 1542 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 1543 } 1544 } 1545 1546 target_id = base->info.target + 1; 1547 if (target_id < BDEV_VIRTIO_MAX_TARGET) { 1548 _virtio_scsi_dev_scan_tgt(base, target_id); 1549 return 0; 1550 } 1551 1552 base->full_scan = false; 1553 } 1554 1555 next = TAILQ_FIRST(&base->scan_queue); 1556 if (next == NULL) { 1557 _virtio_scsi_dev_scan_finish(base, 0); 1558 return 0; 1559 } 1560 1561 TAILQ_REMOVE(&base->scan_queue, next, tailq); 1562 target_id = next->target; 1563 free(next); 1564 1565 _virtio_scsi_dev_scan_tgt(base, target_id); 1566 return 0; 1567 } 1568 1569 static int 1570 _virtio_scsi_dev_scan_init(struct virtio_scsi_dev *svdev) 1571 { 1572 struct virtio_scsi_scan_base *base; 1573 struct spdk_io_channel *io_ch; 1574 struct virtio_scsi_io_ctx *io_ctx; 1575 struct virtio_scsi_cmd_req *req; 1576 struct virtio_scsi_cmd_resp *resp; 1577 1578 io_ch = spdk_get_io_channel(svdev); 1579 if (io_ch == NULL) { 1580 return -EBUSY; 1581 } 1582 1583 base = spdk_zmalloc(sizeof(*base), 64, NULL, 1584 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1585 if (base == NULL) { 1586 SPDK_ERRLOG("couldn't allocate memory for scsi target scan.\n"); 1587 return -ENOMEM; 1588 } 1589 1590 base->svdev = svdev; 1591 1592 base->channel = spdk_io_channel_get_ctx(io_ch); 1593 TAILQ_INIT(&base->scan_queue); 1594 svdev->scan_ctx = base; 1595 1596 base->iov.iov_base = base->payload; 1597 io_ctx = &base->io_ctx; 1598 req = &io_ctx->req; 1599 resp = &io_ctx->resp; 1600 io_ctx->iov_req.iov_base = req; 1601 io_ctx->iov_req.iov_len = sizeof(*req); 1602 io_ctx->iov_resp.iov_base = resp; 1603 io_ctx->iov_resp.iov_len = sizeof(*resp); 1604 1605 base->retries = SCAN_REQUEST_RETRIES; 1606 return 0; 1607 } 1608 1609 static void 1610 _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target) 1611 { 1612 int rc; 1613 1614 memset(&base->info, 0, sizeof(base->info)); 1615 base->info.target = target; 1616 1617 rc = send_inquiry(base); 1618 if (rc) { 1619 /* Let response poller do the resend */ 1620 } 1621 } 1622 1623 static int 1624 virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev, bdev_virtio_create_cb cb_fn, 1625 void *cb_arg) 1626 { 1627 struct virtio_scsi_scan_base *base; 1628 struct virtio_scsi_scan_info *tgt, *next_tgt; 1629 int rc; 1630 1631 if (svdev->scan_ctx) { 1632 if (svdev->scan_ctx->full_scan) { 1633 return -EEXIST; 1634 } 1635 1636 /* We're about to start a full rescan, so there's no need 1637 * to scan particular targets afterwards. 1638 */ 1639 TAILQ_FOREACH_SAFE(tgt, &svdev->scan_ctx->scan_queue, tailq, next_tgt) { 1640 TAILQ_REMOVE(&svdev->scan_ctx->scan_queue, tgt, tailq); 1641 free(tgt); 1642 } 1643 1644 svdev->scan_ctx->cb_fn = cb_fn; 1645 svdev->scan_ctx->cb_arg = cb_arg; 1646 svdev->scan_ctx->restart = true; 1647 return 0; 1648 } 1649 1650 rc = _virtio_scsi_dev_scan_init(svdev); 1651 if (rc != 0) { 1652 return rc; 1653 } 1654 1655 base = svdev->scan_ctx; 1656 base->cb_fn = cb_fn; 1657 base->cb_arg = cb_arg; 1658 base->full_scan = true; 1659 1660 _virtio_scsi_dev_scan_tgt(base, 0); 1661 return 0; 1662 } 1663 1664 static int 1665 virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target) 1666 { 1667 struct virtio_scsi_scan_base *base; 1668 struct virtio_scsi_scan_info *info; 1669 int rc; 1670 1671 base = svdev->scan_ctx; 1672 if (base) { 1673 info = calloc(1, sizeof(*info)); 1674 if (info == NULL) { 1675 SPDK_ERRLOG("calloc failed\n"); 1676 return -ENOMEM; 1677 } 1678 1679 info->target = target; 1680 TAILQ_INSERT_TAIL(&base->scan_queue, info, tailq); 1681 return 0; 1682 } 1683 1684 rc = _virtio_scsi_dev_scan_init(svdev); 1685 if (rc != 0) { 1686 return rc; 1687 } 1688 1689 base = svdev->scan_ctx; 1690 base->full_scan = true; 1691 _virtio_scsi_dev_scan_tgt(base, target); 1692 return 0; 1693 } 1694 1695 static int 1696 bdev_virtio_initialize(void) 1697 { 1698 return 0; 1699 } 1700 1701 static void 1702 _virtio_scsi_dev_unregister_cb(void *io_device) 1703 { 1704 struct virtio_scsi_dev *svdev = io_device; 1705 struct virtio_dev *vdev = &svdev->vdev; 1706 bool finish_module; 1707 bdev_virtio_remove_cb remove_cb; 1708 void *remove_ctx; 1709 1710 assert(spdk_ring_count(svdev->ctrlq_ring) == 0); 1711 spdk_ring_free(svdev->ctrlq_ring); 1712 spdk_poller_unregister(&svdev->mgmt_poller); 1713 1714 virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ); 1715 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 1716 1717 virtio_dev_stop(vdev); 1718 virtio_dev_destruct(vdev); 1719 1720 pthread_mutex_lock(&g_virtio_scsi_mutex); 1721 TAILQ_REMOVE(&g_virtio_scsi_devs, svdev, tailq); 1722 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1723 1724 remove_cb = svdev->remove_cb; 1725 remove_ctx = svdev->remove_ctx; 1726 spdk_free(svdev->eventq_ios); 1727 free(svdev); 1728 1729 if (remove_cb) { 1730 remove_cb(remove_ctx, 0); 1731 } 1732 1733 finish_module = TAILQ_EMPTY(&g_virtio_scsi_devs); 1734 1735 if (g_bdev_virtio_finish && finish_module) { 1736 spdk_bdev_module_finish_done(); 1737 } 1738 } 1739 1740 static void 1741 virtio_scsi_dev_unregister_cb(void *io_device) 1742 { 1743 struct virtio_scsi_dev *svdev = io_device; 1744 struct spdk_thread *thread; 1745 1746 thread = virtio_dev_queue_get_thread(&svdev->vdev, VIRTIO_SCSI_CONTROLQ); 1747 spdk_thread_send_msg(thread, _virtio_scsi_dev_unregister_cb, io_device); 1748 } 1749 1750 static void 1751 virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev, 1752 bdev_virtio_remove_cb cb_fn, void *cb_arg) 1753 { 1754 struct virtio_scsi_disk *disk, *disk_tmp; 1755 bool do_remove = true; 1756 1757 if (svdev->removed) { 1758 if (cb_fn) { 1759 cb_fn(cb_arg, -EBUSY); 1760 } 1761 return; 1762 } 1763 1764 svdev->remove_cb = cb_fn; 1765 svdev->remove_ctx = cb_arg; 1766 svdev->removed = true; 1767 1768 if (svdev->scan_ctx) { 1769 /* The removal will continue after we receive a pending scan I/O. */ 1770 return; 1771 } 1772 1773 TAILQ_FOREACH_SAFE(disk, &svdev->luns, link, disk_tmp) { 1774 if (!disk->removed) { 1775 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 1776 } 1777 do_remove = false; 1778 } 1779 1780 if (do_remove) { 1781 spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb); 1782 } 1783 } 1784 1785 static void 1786 bdev_virtio_finish(void) 1787 { 1788 struct virtio_scsi_dev *svdev, *next; 1789 1790 g_bdev_virtio_finish = true; 1791 1792 pthread_mutex_lock(&g_virtio_scsi_mutex); 1793 if (TAILQ_EMPTY(&g_virtio_scsi_devs)) { 1794 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1795 spdk_bdev_module_finish_done(); 1796 return; 1797 } 1798 1799 /* Defer module finish until all controllers are removed. */ 1800 TAILQ_FOREACH_SAFE(svdev, &g_virtio_scsi_devs, tailq, next) { 1801 virtio_scsi_dev_remove(svdev, NULL, NULL); 1802 } 1803 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1804 } 1805 1806 int 1807 bdev_virtio_user_scsi_dev_create(const char *base_name, const char *path, 1808 unsigned num_queues, unsigned queue_size, 1809 bdev_virtio_create_cb cb_fn, void *cb_arg) 1810 { 1811 struct virtio_scsi_dev *svdev; 1812 int rc; 1813 1814 svdev = virtio_user_scsi_dev_create(base_name, path, num_queues, queue_size); 1815 if (svdev == NULL) { 1816 return -1; 1817 } 1818 1819 rc = virtio_scsi_dev_scan(svdev, cb_fn, cb_arg); 1820 if (rc) { 1821 virtio_scsi_dev_remove(svdev, NULL, NULL); 1822 } 1823 1824 return rc; 1825 } 1826 1827 struct bdev_virtio_pci_dev_create_ctx { 1828 const char *name; 1829 bdev_virtio_create_cb cb_fn; 1830 void *cb_arg; 1831 }; 1832 1833 static int 1834 bdev_virtio_pci_scsi_dev_create_cb(struct virtio_pci_ctx *pci_ctx, void *ctx) 1835 { 1836 struct virtio_scsi_dev *svdev; 1837 struct bdev_virtio_pci_dev_create_ctx *create_ctx = ctx; 1838 int rc; 1839 1840 svdev = virtio_pci_scsi_dev_create(create_ctx->name, pci_ctx); 1841 if (svdev == NULL) { 1842 return -1; 1843 } 1844 1845 rc = virtio_scsi_dev_scan(svdev, create_ctx->cb_fn, create_ctx->cb_arg); 1846 if (rc) { 1847 svdev->vdev.ctx = NULL; 1848 virtio_scsi_dev_remove(svdev, NULL, NULL); 1849 } 1850 1851 return rc; 1852 } 1853 1854 int 1855 bdev_virtio_pci_scsi_dev_create(const char *name, struct spdk_pci_addr *pci_addr, 1856 bdev_virtio_create_cb cb_fn, void *cb_arg) 1857 { 1858 struct bdev_virtio_pci_dev_create_ctx create_ctx; 1859 1860 create_ctx.name = name; 1861 create_ctx.cb_fn = cb_fn; 1862 create_ctx.cb_arg = cb_arg; 1863 1864 return virtio_pci_dev_attach(bdev_virtio_pci_scsi_dev_create_cb, &create_ctx, 1865 PCI_DEVICE_ID_VIRTIO_SCSI_MODERN, pci_addr); 1866 } 1867 1868 int 1869 bdev_virtio_scsi_dev_remove(const char *name, bdev_virtio_remove_cb cb_fn, void *cb_arg) 1870 { 1871 struct virtio_scsi_dev *svdev; 1872 1873 pthread_mutex_lock(&g_virtio_scsi_mutex); 1874 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 1875 if (strcmp(svdev->vdev.name, name) == 0) { 1876 break; 1877 } 1878 } 1879 1880 if (svdev == NULL) { 1881 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1882 SPDK_ERRLOG("Cannot find Virtio-SCSI device named '%s'\n", name); 1883 return -ENODEV; 1884 } 1885 1886 virtio_scsi_dev_remove(svdev, cb_fn, cb_arg); 1887 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1888 1889 return 0; 1890 } 1891 1892 void 1893 bdev_virtio_scsi_dev_list(struct spdk_json_write_ctx *w) 1894 { 1895 struct virtio_scsi_dev *svdev; 1896 1897 spdk_json_write_array_begin(w); 1898 1899 pthread_mutex_lock(&g_virtio_scsi_mutex); 1900 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 1901 spdk_json_write_object_begin(w); 1902 1903 spdk_json_write_named_string(w, "name", svdev->vdev.name); 1904 1905 virtio_dev_dump_json_info(&svdev->vdev, w); 1906 1907 spdk_json_write_object_end(w); 1908 } 1909 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1910 1911 spdk_json_write_array_end(w); 1912 } 1913 1914 SPDK_LOG_REGISTER_COMPONENT(virtio) 1915