1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/bdev.h" 37 #include "spdk/endian.h" 38 #include "spdk/env.h" 39 #include "spdk/thread.h" 40 #include "spdk/scsi_spec.h" 41 #include "spdk/string.h" 42 #include "spdk/util.h" 43 #include "spdk/json.h" 44 45 #include "spdk/bdev_module.h" 46 #include "spdk/log.h" 47 #include "spdk_internal/virtio.h" 48 #include "spdk_internal/vhost_user.h" 49 50 #include <linux/virtio_scsi.h> 51 #include <linux/virtio_ids.h> 52 53 #include "bdev_virtio.h" 54 55 #define BDEV_VIRTIO_MAX_TARGET 64 56 #define BDEV_VIRTIO_SCAN_PAYLOAD_SIZE 256 57 #define MGMT_POLL_PERIOD_US (1000 * 5) 58 #define CTRLQ_RING_SIZE 16 59 #define SCAN_REQUEST_RETRIES 5 60 61 /* Number of non-request queues - eventq and controlq */ 62 #define SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED 2 63 64 #define VIRTIO_SCSI_EVENTQ_BUFFER_COUNT 16 65 66 #define VIRTIO_SCSI_CONTROLQ 0 67 #define VIRTIO_SCSI_EVENTQ 1 68 #define VIRTIO_SCSI_REQUESTQ 2 69 70 static int bdev_virtio_initialize(void); 71 static void bdev_virtio_finish(void); 72 73 struct virtio_scsi_dev { 74 /* Generic virtio device data. */ 75 struct virtio_dev vdev; 76 77 /** Detected SCSI LUNs */ 78 TAILQ_HEAD(, virtio_scsi_disk) luns; 79 80 /** Context for the SCSI target scan. */ 81 struct virtio_scsi_scan_base *scan_ctx; 82 83 /** Controlq poller. */ 84 struct spdk_poller *mgmt_poller; 85 86 /** Controlq messages to be sent. */ 87 struct spdk_ring *ctrlq_ring; 88 89 /** Buffers for the eventq. */ 90 struct virtio_scsi_eventq_io *eventq_ios; 91 92 /** Device marked for removal. */ 93 bool removed; 94 95 /** Callback to be called after vdev removal. */ 96 bdev_virtio_remove_cb remove_cb; 97 98 /** Context for the `remove_cb`. */ 99 void *remove_ctx; 100 101 TAILQ_ENTRY(virtio_scsi_dev) tailq; 102 }; 103 104 struct virtio_scsi_io_ctx { 105 struct iovec iov_req; 106 struct iovec iov_resp; 107 union { 108 struct virtio_scsi_cmd_req req; 109 struct virtio_scsi_ctrl_tmf_req tmf_req; 110 }; 111 union { 112 struct virtio_scsi_cmd_resp resp; 113 struct virtio_scsi_ctrl_tmf_resp tmf_resp; 114 }; 115 }; 116 117 struct virtio_scsi_eventq_io { 118 struct iovec iov; 119 struct virtio_scsi_event ev; 120 }; 121 122 struct virtio_scsi_scan_info { 123 uint64_t num_blocks; 124 uint32_t block_size; 125 uint8_t target; 126 bool unmap_supported; 127 TAILQ_ENTRY(virtio_scsi_scan_info) tailq; 128 }; 129 130 struct virtio_scsi_scan_base { 131 struct virtio_scsi_dev *svdev; 132 133 /** I/O channel used for the scan I/O. */ 134 struct bdev_virtio_io_channel *channel; 135 136 bdev_virtio_create_cb cb_fn; 137 void *cb_arg; 138 139 /** Scan all targets on the device. */ 140 bool full_scan; 141 142 /** Start a full rescan after receiving next scan I/O response. */ 143 bool restart; 144 145 /** Additional targets to be (re)scanned. */ 146 TAILQ_HEAD(, virtio_scsi_scan_info) scan_queue; 147 148 /** Remaining attempts for sending the current request. */ 149 unsigned retries; 150 151 /** If set, the last scan I/O needs to be resent */ 152 bool needs_resend; 153 154 struct virtio_scsi_io_ctx io_ctx; 155 struct iovec iov; 156 uint8_t payload[BDEV_VIRTIO_SCAN_PAYLOAD_SIZE]; 157 158 /** Scan results for the current target. */ 159 struct virtio_scsi_scan_info info; 160 }; 161 162 struct virtio_scsi_disk { 163 struct spdk_bdev bdev; 164 struct virtio_scsi_dev *svdev; 165 struct virtio_scsi_scan_info info; 166 167 /** Descriptor opened just to be notified of external bdev hotremove. */ 168 struct spdk_bdev_desc *notify_desc; 169 170 /** Disk marked for removal. */ 171 bool removed; 172 TAILQ_ENTRY(virtio_scsi_disk) link; 173 }; 174 175 struct bdev_virtio_io_channel { 176 struct virtio_scsi_dev *svdev; 177 178 /** Virtqueue exclusively assigned to this channel. */ 179 struct virtqueue *vq; 180 181 /** Virtio response poller. */ 182 struct spdk_poller *poller; 183 }; 184 185 static TAILQ_HEAD(, virtio_scsi_dev) g_virtio_scsi_devs = 186 TAILQ_HEAD_INITIALIZER(g_virtio_scsi_devs); 187 188 static pthread_mutex_t g_virtio_scsi_mutex = PTHREAD_MUTEX_INITIALIZER; 189 190 /** Module finish in progress */ 191 static bool g_bdev_virtio_finish = false; 192 193 /* Features desired/implemented by this driver. */ 194 #define VIRTIO_SCSI_DEV_SUPPORTED_FEATURES \ 195 (1ULL << VIRTIO_SCSI_F_INOUT | \ 196 1ULL << VIRTIO_SCSI_F_HOTPLUG | \ 197 1ULL << VIRTIO_RING_F_EVENT_IDX | \ 198 1ULL << VHOST_USER_F_PROTOCOL_FEATURES) 199 200 static void virtio_scsi_dev_unregister_cb(void *io_device); 201 static void virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev, 202 bdev_virtio_remove_cb cb_fn, void *cb_arg); 203 static int bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf); 204 static void bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf); 205 static void process_scan_resp(struct virtio_scsi_scan_base *base); 206 static int bdev_virtio_mgmt_poll(void *arg); 207 208 static int 209 virtio_scsi_dev_send_eventq_io(struct virtqueue *vq, struct virtio_scsi_eventq_io *io) 210 { 211 int rc; 212 213 rc = virtqueue_req_start(vq, io, 1); 214 if (rc != 0) { 215 return -1; 216 } 217 218 virtqueue_req_add_iovs(vq, &io->iov, 1, SPDK_VIRTIO_DESC_WR); 219 virtqueue_req_flush(vq); 220 221 return 0; 222 } 223 224 static int 225 virtio_scsi_dev_init(struct virtio_scsi_dev *svdev, uint16_t max_queues) 226 { 227 struct virtio_dev *vdev = &svdev->vdev; 228 struct spdk_ring *ctrlq_ring; 229 struct virtio_scsi_eventq_io *eventq_io; 230 struct virtqueue *eventq; 231 uint16_t i, num_events; 232 int rc; 233 234 rc = virtio_dev_reset(vdev, VIRTIO_SCSI_DEV_SUPPORTED_FEATURES); 235 if (rc != 0) { 236 return rc; 237 } 238 239 rc = virtio_dev_start(vdev, max_queues, SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED); 240 if (rc != 0) { 241 return rc; 242 } 243 244 ctrlq_ring = spdk_ring_create(SPDK_RING_TYPE_MP_SC, CTRLQ_RING_SIZE, 245 SPDK_ENV_SOCKET_ID_ANY); 246 if (ctrlq_ring == NULL) { 247 SPDK_ERRLOG("Failed to allocate send ring for the controlq.\n"); 248 return -1; 249 } 250 251 rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_CONTROLQ); 252 if (rc != 0) { 253 SPDK_ERRLOG("Failed to acquire the controlq.\n"); 254 spdk_ring_free(ctrlq_ring); 255 return -1; 256 } 257 258 rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_EVENTQ); 259 if (rc != 0) { 260 SPDK_ERRLOG("Failed to acquire the eventq.\n"); 261 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 262 spdk_ring_free(ctrlq_ring); 263 return -1; 264 } 265 266 eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ]; 267 num_events = spdk_min(eventq->vq_nentries, VIRTIO_SCSI_EVENTQ_BUFFER_COUNT); 268 svdev->eventq_ios = spdk_zmalloc(sizeof(*svdev->eventq_ios) * num_events, 269 0, NULL, SPDK_ENV_LCORE_ID_ANY, 270 SPDK_MALLOC_DMA); 271 if (svdev->eventq_ios == NULL) { 272 SPDK_ERRLOG("cannot allocate memory for %"PRIu16" eventq buffers\n", 273 num_events); 274 virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ); 275 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 276 spdk_ring_free(ctrlq_ring); 277 return -1; 278 } 279 280 for (i = 0; i < num_events; i++) { 281 eventq_io = &svdev->eventq_ios[i]; 282 eventq_io->iov.iov_base = &eventq_io->ev; 283 eventq_io->iov.iov_len = sizeof(eventq_io->ev); 284 virtio_scsi_dev_send_eventq_io(eventq, eventq_io); 285 } 286 287 svdev->ctrlq_ring = ctrlq_ring; 288 289 svdev->mgmt_poller = SPDK_POLLER_REGISTER(bdev_virtio_mgmt_poll, svdev, 290 MGMT_POLL_PERIOD_US); 291 292 TAILQ_INIT(&svdev->luns); 293 svdev->scan_ctx = NULL; 294 svdev->removed = false; 295 svdev->remove_cb = NULL; 296 svdev->remove_ctx = NULL; 297 298 spdk_io_device_register(svdev, bdev_virtio_scsi_ch_create_cb, 299 bdev_virtio_scsi_ch_destroy_cb, 300 sizeof(struct bdev_virtio_io_channel), 301 svdev->vdev.name); 302 303 pthread_mutex_lock(&g_virtio_scsi_mutex); 304 TAILQ_INSERT_TAIL(&g_virtio_scsi_devs, svdev, tailq); 305 pthread_mutex_unlock(&g_virtio_scsi_mutex); 306 return 0; 307 } 308 309 static struct virtio_scsi_dev * 310 virtio_pci_scsi_dev_create(const char *name, struct virtio_pci_ctx *pci_ctx) 311 { 312 static int pci_dev_counter = 0; 313 struct virtio_scsi_dev *svdev; 314 struct virtio_dev *vdev; 315 char *default_name = NULL; 316 uint32_t num_queues; 317 int rc; 318 319 svdev = calloc(1, sizeof(*svdev)); 320 if (svdev == NULL) { 321 SPDK_ERRLOG("virtio device calloc failed\n"); 322 return NULL; 323 } 324 325 vdev = &svdev->vdev; 326 if (name == NULL) { 327 default_name = spdk_sprintf_alloc("VirtioScsi%"PRIu32, pci_dev_counter++); 328 if (default_name == NULL) { 329 free(vdev); 330 return NULL; 331 } 332 name = default_name; 333 } 334 335 rc = virtio_pci_dev_init(vdev, name, pci_ctx); 336 free(default_name); 337 338 if (rc != 0) { 339 free(svdev); 340 return NULL; 341 } 342 343 rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_scsi_config, num_queues), 344 &num_queues, sizeof(num_queues)); 345 if (rc) { 346 SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc)); 347 goto fail; 348 } 349 350 rc = virtio_scsi_dev_init(svdev, num_queues); 351 if (rc != 0) { 352 goto fail; 353 } 354 355 return svdev; 356 357 fail: 358 vdev->ctx = NULL; 359 virtio_dev_destruct(vdev); 360 free(svdev); 361 return NULL; 362 } 363 364 static struct virtio_scsi_dev * 365 virtio_user_scsi_dev_create(const char *name, const char *path, 366 uint16_t num_queues, uint32_t queue_size) 367 { 368 struct virtio_scsi_dev *svdev; 369 struct virtio_dev *vdev; 370 int rc; 371 372 svdev = calloc(1, sizeof(*svdev)); 373 if (svdev == NULL) { 374 SPDK_ERRLOG("calloc failed for virtio device %s: %s\n", name, path); 375 return NULL; 376 } 377 378 vdev = &svdev->vdev; 379 rc = virtio_user_dev_init(vdev, name, path, queue_size); 380 if (rc != 0) { 381 SPDK_ERRLOG("Failed to create virito device %s: %s\n", name, path); 382 free(svdev); 383 return NULL; 384 } 385 386 rc = virtio_scsi_dev_init(svdev, num_queues); 387 if (rc != 0) { 388 virtio_dev_destruct(vdev); 389 free(svdev); 390 return NULL; 391 } 392 393 return svdev; 394 } 395 396 static struct virtio_scsi_disk * 397 virtio_scsi_dev_get_disk_by_id(struct virtio_scsi_dev *svdev, uint8_t target_id) 398 { 399 struct virtio_scsi_disk *disk; 400 401 TAILQ_FOREACH(disk, &svdev->luns, link) { 402 if (disk->info.target == target_id) { 403 return disk; 404 } 405 } 406 407 return NULL; 408 } 409 410 static int virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev, 411 bdev_virtio_create_cb cb_fn, void *cb_arg); 412 static int send_scan_io(struct virtio_scsi_scan_base *base); 413 static void _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target); 414 static int _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc); 415 static void _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum); 416 static int virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target); 417 418 static int 419 bdev_virtio_get_ctx_size(void) 420 { 421 return sizeof(struct virtio_scsi_io_ctx); 422 } 423 424 static int 425 bdev_virtio_scsi_config_json(struct spdk_json_write_ctx *w) 426 { 427 struct virtio_scsi_dev *svdev; 428 429 pthread_mutex_lock(&g_virtio_scsi_mutex); 430 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 431 spdk_json_write_object_begin(w); 432 433 spdk_json_write_named_string(w, "method", "bdev_virtio_attach_controller"); 434 435 spdk_json_write_named_object_begin(w, "params"); 436 spdk_json_write_named_string(w, "name", svdev->vdev.name); 437 spdk_json_write_named_string(w, "dev_type", "scsi"); 438 439 /* Write transport specific parameters. */ 440 svdev->vdev.backend_ops->write_json_config(&svdev->vdev, w); 441 442 spdk_json_write_object_end(w); 443 444 spdk_json_write_object_end(w); 445 446 } 447 pthread_mutex_unlock(&g_virtio_scsi_mutex); 448 449 return 0; 450 } 451 452 453 static struct spdk_bdev_module virtio_scsi_if = { 454 .name = "virtio_scsi", 455 .module_init = bdev_virtio_initialize, 456 .module_fini = bdev_virtio_finish, 457 .get_ctx_size = bdev_virtio_get_ctx_size, 458 .config_json = bdev_virtio_scsi_config_json, 459 .async_fini = true, 460 }; 461 462 SPDK_BDEV_MODULE_REGISTER(virtio_scsi, &virtio_scsi_if) 463 464 static struct virtio_scsi_io_ctx * 465 bdev_virtio_init_io_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 466 { 467 struct virtio_scsi_cmd_req *req; 468 struct virtio_scsi_cmd_resp *resp; 469 struct virtio_scsi_disk *disk = (struct virtio_scsi_disk *)bdev_io->bdev; 470 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 471 472 req = &io_ctx->req; 473 resp = &io_ctx->resp; 474 475 io_ctx->iov_req.iov_base = req; 476 io_ctx->iov_req.iov_len = sizeof(*req); 477 478 io_ctx->iov_resp.iov_base = resp; 479 io_ctx->iov_resp.iov_len = sizeof(*resp); 480 481 memset(req, 0, sizeof(*req)); 482 req->lun[0] = 1; 483 req->lun[1] = disk->info.target; 484 485 return io_ctx; 486 } 487 488 static struct virtio_scsi_io_ctx * 489 bdev_virtio_init_tmf_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 490 { 491 struct virtio_scsi_ctrl_tmf_req *tmf_req; 492 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 493 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 494 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 495 496 tmf_req = &io_ctx->tmf_req; 497 tmf_resp = &io_ctx->tmf_resp; 498 499 io_ctx->iov_req.iov_base = tmf_req; 500 io_ctx->iov_req.iov_len = sizeof(*tmf_req); 501 io_ctx->iov_resp.iov_base = tmf_resp; 502 io_ctx->iov_resp.iov_len = sizeof(*tmf_resp); 503 504 memset(tmf_req, 0, sizeof(*tmf_req)); 505 tmf_req->lun[0] = 1; 506 tmf_req->lun[1] = disk->info.target; 507 508 return io_ctx; 509 } 510 511 static void 512 bdev_virtio_send_io(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 513 { 514 struct bdev_virtio_io_channel *virtio_channel = spdk_io_channel_get_ctx(ch); 515 struct virtqueue *vq = virtio_channel->vq; 516 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 517 int rc; 518 519 rc = virtqueue_req_start(vq, bdev_io, bdev_io->u.bdev.iovcnt + 2); 520 if (rc == -ENOMEM) { 521 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 522 return; 523 } else if (rc != 0) { 524 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 525 return; 526 } 527 528 virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 529 if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 530 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 531 virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 532 SPDK_VIRTIO_DESC_WR); 533 } else { 534 virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 535 SPDK_VIRTIO_DESC_RO); 536 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 537 } 538 539 virtqueue_req_flush(vq); 540 } 541 542 static void 543 bdev_virtio_rw(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 544 { 545 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 546 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io); 547 struct virtio_scsi_cmd_req *req = &io_ctx->req; 548 bool is_write = bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE; 549 550 if (disk->info.num_blocks > (1ULL << 32)) { 551 req->cdb[0] = is_write ? SPDK_SBC_WRITE_16 : SPDK_SBC_READ_16; 552 to_be64(&req->cdb[2], bdev_io->u.bdev.offset_blocks); 553 to_be32(&req->cdb[10], bdev_io->u.bdev.num_blocks); 554 } else { 555 req->cdb[0] = is_write ? SPDK_SBC_WRITE_10 : SPDK_SBC_READ_10; 556 to_be32(&req->cdb[2], bdev_io->u.bdev.offset_blocks); 557 to_be16(&req->cdb[7], bdev_io->u.bdev.num_blocks); 558 } 559 560 bdev_virtio_send_io(ch, bdev_io); 561 } 562 563 static void 564 bdev_virtio_reset(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 565 { 566 struct bdev_virtio_io_channel *virtio_ch = spdk_io_channel_get_ctx(ch); 567 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_tmf_vreq(ch, bdev_io); 568 struct virtio_scsi_ctrl_tmf_req *tmf_req = &io_ctx->tmf_req; 569 struct virtio_scsi_dev *svdev = virtio_ch->svdev; 570 size_t enqueued_count; 571 572 tmf_req->type = VIRTIO_SCSI_T_TMF; 573 tmf_req->subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET; 574 575 enqueued_count = spdk_ring_enqueue(svdev->ctrlq_ring, (void **)&bdev_io, 1, NULL); 576 if (spdk_likely(enqueued_count == 1)) { 577 return; 578 } else { 579 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 580 } 581 } 582 583 static void 584 bdev_virtio_unmap(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success) 585 { 586 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io); 587 struct virtio_scsi_cmd_req *req = &io_ctx->req; 588 struct spdk_scsi_unmap_bdesc *desc, *first_desc; 589 uint8_t *buf; 590 uint64_t offset_blocks, num_blocks; 591 uint16_t cmd_len; 592 593 if (!success) { 594 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 595 return; 596 } 597 598 buf = bdev_io->u.bdev.iovs[0].iov_base; 599 600 offset_blocks = bdev_io->u.bdev.offset_blocks; 601 num_blocks = bdev_io->u.bdev.num_blocks; 602 603 /* (n-1) * 16-byte descriptors */ 604 first_desc = desc = (struct spdk_scsi_unmap_bdesc *)&buf[8]; 605 while (num_blocks > UINT32_MAX) { 606 to_be64(&desc->lba, offset_blocks); 607 to_be32(&desc->block_count, UINT32_MAX); 608 memset(&desc->reserved, 0, sizeof(desc->reserved)); 609 offset_blocks += UINT32_MAX; 610 num_blocks -= UINT32_MAX; 611 desc++; 612 } 613 614 /* The last descriptor with block_count <= UINT32_MAX */ 615 to_be64(&desc->lba, offset_blocks); 616 to_be32(&desc->block_count, num_blocks); 617 memset(&desc->reserved, 0, sizeof(desc->reserved)); 618 619 /* 8-byte header + n * 16-byte block descriptor */ 620 cmd_len = 8 + (desc - first_desc + 1) * sizeof(struct spdk_scsi_unmap_bdesc); 621 622 req->cdb[0] = SPDK_SBC_UNMAP; 623 to_be16(&req->cdb[7], cmd_len); 624 625 /* 8-byte header */ 626 to_be16(&buf[0], cmd_len - 2); /* total length (excluding the length field) */ 627 to_be16(&buf[2], cmd_len - 8); /* length of block descriptors */ 628 memset(&buf[4], 0, 4); /* reserved */ 629 630 bdev_virtio_send_io(ch, bdev_io); 631 } 632 633 static void 634 bdev_virtio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 635 bool success) 636 { 637 if (!success) { 638 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 639 return; 640 } 641 642 bdev_virtio_rw(ch, bdev_io); 643 } 644 645 static int _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 646 { 647 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 648 649 switch (bdev_io->type) { 650 case SPDK_BDEV_IO_TYPE_READ: 651 spdk_bdev_io_get_buf(bdev_io, bdev_virtio_get_buf_cb, 652 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 653 return 0; 654 case SPDK_BDEV_IO_TYPE_WRITE: 655 bdev_virtio_rw(ch, bdev_io); 656 return 0; 657 case SPDK_BDEV_IO_TYPE_RESET: 658 bdev_virtio_reset(ch, bdev_io); 659 return 0; 660 case SPDK_BDEV_IO_TYPE_UNMAP: { 661 uint64_t buf_len = 8 /* header size */ + 662 (bdev_io->u.bdev.num_blocks + UINT32_MAX - 1) / 663 UINT32_MAX * sizeof(struct spdk_scsi_unmap_bdesc); 664 665 if (!disk->info.unmap_supported) { 666 return -1; 667 } 668 669 if (buf_len > SPDK_BDEV_LARGE_BUF_MAX_SIZE) { 670 SPDK_ERRLOG("Trying to UNMAP too many blocks: %"PRIu64"\n", 671 bdev_io->u.bdev.num_blocks); 672 return -1; 673 } 674 spdk_bdev_io_get_buf(bdev_io, bdev_virtio_unmap, buf_len); 675 return 0; 676 } 677 case SPDK_BDEV_IO_TYPE_FLUSH: 678 default: 679 return -1; 680 } 681 return 0; 682 } 683 684 static void bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 685 { 686 if (_bdev_virtio_submit_request(ch, bdev_io) < 0) { 687 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 688 } 689 } 690 691 static bool 692 bdev_virtio_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) 693 { 694 struct virtio_scsi_disk *disk = ctx; 695 696 switch (io_type) { 697 case SPDK_BDEV_IO_TYPE_READ: 698 case SPDK_BDEV_IO_TYPE_WRITE: 699 case SPDK_BDEV_IO_TYPE_FLUSH: 700 case SPDK_BDEV_IO_TYPE_RESET: 701 return true; 702 703 case SPDK_BDEV_IO_TYPE_UNMAP: 704 return disk->info.unmap_supported; 705 706 default: 707 return false; 708 } 709 } 710 711 static struct spdk_io_channel * 712 bdev_virtio_get_io_channel(void *ctx) 713 { 714 struct virtio_scsi_disk *disk = ctx; 715 716 return spdk_get_io_channel(disk->svdev); 717 } 718 719 static int 720 bdev_virtio_disk_destruct(void *ctx) 721 { 722 struct virtio_scsi_disk *disk = ctx; 723 struct virtio_scsi_dev *svdev = disk->svdev; 724 725 TAILQ_REMOVE(&svdev->luns, disk, link); 726 free(disk->bdev.name); 727 free(disk); 728 729 if (svdev->removed && TAILQ_EMPTY(&svdev->luns)) { 730 spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb); 731 } 732 733 return 0; 734 } 735 736 static int 737 bdev_virtio_dump_info_json(void *ctx, struct spdk_json_write_ctx *w) 738 { 739 struct virtio_scsi_disk *disk = ctx; 740 741 virtio_dev_dump_json_info(&disk->svdev->vdev, w); 742 return 0; 743 } 744 745 static void 746 bdev_virtio_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w) 747 { 748 /* SCSI targets and LUNS are discovered during scan process so nothing 749 * to save here. 750 */ 751 } 752 753 static const struct spdk_bdev_fn_table virtio_fn_table = { 754 .destruct = bdev_virtio_disk_destruct, 755 .submit_request = bdev_virtio_submit_request, 756 .io_type_supported = bdev_virtio_io_type_supported, 757 .get_io_channel = bdev_virtio_get_io_channel, 758 .dump_info_json = bdev_virtio_dump_info_json, 759 .write_config_json = bdev_virtio_write_config_json, 760 }; 761 762 static void 763 get_scsi_status(struct virtio_scsi_cmd_resp *resp, int *sk, int *asc, int *ascq) 764 { 765 /* see spdk_scsi_task_build_sense_data() for sense data details */ 766 *sk = 0; 767 *asc = 0; 768 *ascq = 0; 769 770 if (resp->sense_len < 3) { 771 return; 772 } 773 774 *sk = resp->sense[2] & 0xf; 775 776 if (resp->sense_len < 13) { 777 return; 778 } 779 780 *asc = resp->sense[12]; 781 782 if (resp->sense_len < 14) { 783 return; 784 } 785 786 *ascq = resp->sense[13]; 787 } 788 789 static void 790 bdev_virtio_io_cpl(struct spdk_bdev_io *bdev_io) 791 { 792 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 793 int sk, asc, ascq; 794 795 get_scsi_status(&io_ctx->resp, &sk, &asc, &ascq); 796 spdk_bdev_io_complete_scsi_status(bdev_io, io_ctx->resp.status, sk, asc, ascq); 797 } 798 799 static int 800 bdev_virtio_poll(void *arg) 801 { 802 struct bdev_virtio_io_channel *ch = arg; 803 struct virtio_scsi_dev *svdev = ch->svdev; 804 struct virtio_scsi_scan_base *scan_ctx = svdev->scan_ctx; 805 void *io[32]; 806 uint32_t io_len[32]; 807 uint16_t i, cnt; 808 int rc; 809 810 cnt = virtio_recv_pkts(ch->vq, (void **)io, io_len, SPDK_COUNTOF(io)); 811 for (i = 0; i < cnt; ++i) { 812 if (spdk_unlikely(scan_ctx && io[i] == &scan_ctx->io_ctx)) { 813 if (svdev->removed) { 814 _virtio_scsi_dev_scan_finish(scan_ctx, -EINTR); 815 return SPDK_POLLER_BUSY; 816 } 817 818 if (scan_ctx->restart) { 819 scan_ctx->restart = false; 820 scan_ctx->full_scan = true; 821 _virtio_scsi_dev_scan_tgt(scan_ctx, 0); 822 continue; 823 } 824 825 process_scan_resp(scan_ctx); 826 continue; 827 } 828 829 bdev_virtio_io_cpl(io[i]); 830 } 831 832 if (spdk_unlikely(scan_ctx && scan_ctx->needs_resend)) { 833 if (svdev->removed) { 834 _virtio_scsi_dev_scan_finish(scan_ctx, -EINTR); 835 return SPDK_POLLER_BUSY; 836 } else if (cnt == 0) { 837 return SPDK_POLLER_IDLE; 838 } 839 840 rc = send_scan_io(scan_ctx); 841 if (rc != 0) { 842 assert(scan_ctx->retries > 0); 843 scan_ctx->retries--; 844 if (scan_ctx->retries == 0) { 845 SPDK_ERRLOG("Target scan failed unrecoverably with rc = %d.\n", rc); 846 _virtio_scsi_dev_scan_finish(scan_ctx, rc); 847 } 848 } 849 } 850 851 return cnt; 852 } 853 854 static void 855 bdev_virtio_tmf_cpl_cb(void *ctx) 856 { 857 struct spdk_bdev_io *bdev_io = ctx; 858 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 859 860 if (io_ctx->tmf_resp.response == VIRTIO_SCSI_S_OK) { 861 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 862 } else { 863 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 864 } 865 } 866 867 static void 868 bdev_virtio_tmf_cpl(struct spdk_bdev_io *bdev_io) 869 { 870 spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), bdev_virtio_tmf_cpl_cb, bdev_io); 871 } 872 873 static void 874 bdev_virtio_eventq_io_cpl(struct virtio_scsi_dev *svdev, struct virtio_scsi_eventq_io *io) 875 { 876 struct virtio_scsi_event *ev = &io->ev; 877 struct virtio_scsi_disk *disk; 878 879 if (ev->lun[0] != 1) { 880 SPDK_WARNLOG("Received an event with invalid data layout.\n"); 881 goto out; 882 } 883 884 if (ev->event & VIRTIO_SCSI_T_EVENTS_MISSED) { 885 ev->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED; 886 virtio_scsi_dev_scan(svdev, NULL, NULL); 887 } 888 889 switch (ev->event) { 890 case VIRTIO_SCSI_T_NO_EVENT: 891 break; 892 case VIRTIO_SCSI_T_TRANSPORT_RESET: 893 switch (ev->reason) { 894 case VIRTIO_SCSI_EVT_RESET_RESCAN: 895 virtio_scsi_dev_scan_tgt(svdev, ev->lun[1]); 896 break; 897 case VIRTIO_SCSI_EVT_RESET_REMOVED: 898 disk = virtio_scsi_dev_get_disk_by_id(svdev, ev->lun[1]); 899 if (disk != NULL) { 900 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 901 } 902 break; 903 default: 904 break; 905 } 906 break; 907 default: 908 break; 909 } 910 911 out: 912 virtio_scsi_dev_send_eventq_io(svdev->vdev.vqs[VIRTIO_SCSI_EVENTQ], io); 913 } 914 915 static void 916 bdev_virtio_tmf_abort_nomem_cb(void *ctx) 917 { 918 struct spdk_bdev_io *bdev_io = ctx; 919 920 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 921 } 922 923 static void 924 bdev_virtio_tmf_abort_ioerr_cb(void *ctx) 925 { 926 struct spdk_bdev_io *bdev_io = ctx; 927 928 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 929 } 930 931 static void 932 bdev_virtio_tmf_abort(struct spdk_bdev_io *bdev_io, int status) 933 { 934 spdk_msg_fn fn; 935 936 if (status == -ENOMEM) { 937 fn = bdev_virtio_tmf_abort_nomem_cb; 938 } else { 939 fn = bdev_virtio_tmf_abort_ioerr_cb; 940 } 941 942 spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), fn, bdev_io); 943 } 944 945 static int 946 bdev_virtio_send_tmf_io(struct virtqueue *ctrlq, struct spdk_bdev_io *bdev_io) 947 { 948 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 949 int rc; 950 951 rc = virtqueue_req_start(ctrlq, bdev_io, 2); 952 if (rc != 0) { 953 return rc; 954 } 955 956 virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 957 virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 958 959 virtqueue_req_flush(ctrlq); 960 return 0; 961 } 962 963 static int 964 bdev_virtio_mgmt_poll(void *arg) 965 { 966 struct virtio_scsi_dev *svdev = arg; 967 struct virtio_dev *vdev = &svdev->vdev; 968 struct virtqueue *eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ]; 969 struct virtqueue *ctrlq = vdev->vqs[VIRTIO_SCSI_CONTROLQ]; 970 struct spdk_ring *send_ring = svdev->ctrlq_ring; 971 void *io[16]; 972 uint32_t io_len[16]; 973 uint16_t i, cnt; 974 int rc; 975 int total = 0; 976 977 cnt = spdk_ring_dequeue(send_ring, io, SPDK_COUNTOF(io)); 978 total += cnt; 979 for (i = 0; i < cnt; ++i) { 980 rc = bdev_virtio_send_tmf_io(ctrlq, io[i]); 981 if (rc != 0) { 982 bdev_virtio_tmf_abort(io[i], rc); 983 } 984 } 985 986 cnt = virtio_recv_pkts(ctrlq, io, io_len, SPDK_COUNTOF(io)); 987 total += cnt; 988 for (i = 0; i < cnt; ++i) { 989 bdev_virtio_tmf_cpl(io[i]); 990 } 991 992 cnt = virtio_recv_pkts(eventq, io, io_len, SPDK_COUNTOF(io)); 993 total += cnt; 994 for (i = 0; i < cnt; ++i) { 995 bdev_virtio_eventq_io_cpl(svdev, io[i]); 996 } 997 998 return total; 999 } 1000 1001 static int 1002 bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf) 1003 { 1004 struct virtio_scsi_dev *svdev = io_device; 1005 struct virtio_dev *vdev = &svdev->vdev; 1006 struct bdev_virtio_io_channel *ch = ctx_buf; 1007 struct virtqueue *vq; 1008 int32_t queue_idx; 1009 1010 queue_idx = virtio_dev_find_and_acquire_queue(vdev, VIRTIO_SCSI_REQUESTQ); 1011 if (queue_idx < 0) { 1012 SPDK_ERRLOG("Couldn't get an unused queue for the io_channel.\n"); 1013 return -1; 1014 } 1015 1016 vq = vdev->vqs[queue_idx]; 1017 1018 ch->svdev = svdev; 1019 ch->vq = vq; 1020 1021 ch->poller = SPDK_POLLER_REGISTER(bdev_virtio_poll, ch, 0); 1022 1023 return 0; 1024 } 1025 1026 static void 1027 bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf) 1028 { 1029 struct bdev_virtio_io_channel *ch = ctx_buf; 1030 struct virtio_scsi_dev *svdev = ch->svdev; 1031 struct virtio_dev *vdev = &svdev->vdev; 1032 struct virtqueue *vq = ch->vq; 1033 1034 spdk_poller_unregister(&ch->poller); 1035 virtio_dev_release_queue(vdev, vq->vq_queue_index); 1036 } 1037 1038 static void 1039 _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum) 1040 { 1041 struct virtio_scsi_dev *svdev = base->svdev; 1042 size_t bdevs_cnt; 1043 struct spdk_bdev *bdevs[BDEV_VIRTIO_MAX_TARGET]; 1044 struct virtio_scsi_disk *disk; 1045 struct virtio_scsi_scan_info *tgt, *next_tgt; 1046 1047 spdk_put_io_channel(spdk_io_channel_from_ctx(base->channel)); 1048 base->svdev->scan_ctx = NULL; 1049 1050 TAILQ_FOREACH_SAFE(tgt, &base->scan_queue, tailq, next_tgt) { 1051 TAILQ_REMOVE(&base->scan_queue, tgt, tailq); 1052 free(tgt); 1053 } 1054 1055 if (base->cb_fn == NULL) { 1056 spdk_free(base); 1057 return; 1058 } 1059 1060 bdevs_cnt = 0; 1061 if (errnum == 0) { 1062 TAILQ_FOREACH(disk, &svdev->luns, link) { 1063 bdevs[bdevs_cnt] = &disk->bdev; 1064 bdevs_cnt++; 1065 } 1066 } 1067 1068 base->cb_fn(base->cb_arg, errnum, bdevs, bdevs_cnt); 1069 spdk_free(base); 1070 } 1071 1072 static int 1073 send_scan_io(struct virtio_scsi_scan_base *base) 1074 { 1075 struct virtio_scsi_io_ctx *io_ctx = &base->io_ctx; 1076 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1077 struct virtqueue *vq = base->channel->vq; 1078 int payload_iov_cnt = base->iov.iov_len > 0 ? 1 : 0; 1079 int rc; 1080 1081 req->lun[0] = 1; 1082 req->lun[1] = base->info.target; 1083 1084 rc = virtqueue_req_start(vq, io_ctx, 2 + payload_iov_cnt); 1085 if (rc != 0) { 1086 base->needs_resend = true; 1087 return -1; 1088 } 1089 1090 virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 1091 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 1092 virtqueue_req_add_iovs(vq, &base->iov, payload_iov_cnt, SPDK_VIRTIO_DESC_WR); 1093 1094 virtqueue_req_flush(vq); 1095 return 0; 1096 } 1097 1098 static int 1099 send_inquiry(struct virtio_scsi_scan_base *base) 1100 { 1101 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1102 struct spdk_scsi_cdb_inquiry *cdb; 1103 1104 memset(req, 0, sizeof(*req)); 1105 1106 base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE; 1107 cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1108 cdb->opcode = SPDK_SPC_INQUIRY; 1109 to_be16(cdb->alloc_len, BDEV_VIRTIO_SCAN_PAYLOAD_SIZE); 1110 1111 return send_scan_io(base); 1112 } 1113 1114 static int 1115 send_inquiry_vpd(struct virtio_scsi_scan_base *base, uint8_t page_code) 1116 { 1117 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1118 struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1119 1120 memset(req, 0, sizeof(*req)); 1121 1122 base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE; 1123 inquiry_cdb->opcode = SPDK_SPC_INQUIRY; 1124 inquiry_cdb->evpd = 1; 1125 inquiry_cdb->page_code = page_code; 1126 to_be16(inquiry_cdb->alloc_len, base->iov.iov_len); 1127 1128 return send_scan_io(base); 1129 } 1130 1131 static int 1132 send_read_cap_10(struct virtio_scsi_scan_base *base) 1133 { 1134 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1135 1136 memset(req, 0, sizeof(*req)); 1137 1138 base->iov.iov_len = 8; 1139 req->cdb[0] = SPDK_SBC_READ_CAPACITY_10; 1140 1141 return send_scan_io(base); 1142 } 1143 1144 static int 1145 send_read_cap_16(struct virtio_scsi_scan_base *base) 1146 { 1147 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1148 1149 memset(req, 0, sizeof(*req)); 1150 1151 base->iov.iov_len = 32; 1152 req->cdb[0] = SPDK_SPC_SERVICE_ACTION_IN_16; 1153 req->cdb[1] = SPDK_SBC_SAI_READ_CAPACITY_16; 1154 to_be32(&req->cdb[10], base->iov.iov_len); 1155 1156 return send_scan_io(base); 1157 } 1158 1159 static int 1160 send_test_unit_ready(struct virtio_scsi_scan_base *base) 1161 { 1162 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1163 1164 memset(req, 0, sizeof(*req)); 1165 req->cdb[0] = SPDK_SPC_TEST_UNIT_READY; 1166 base->iov.iov_len = 0; 1167 1168 return send_scan_io(base); 1169 } 1170 1171 static int 1172 send_start_stop_unit(struct virtio_scsi_scan_base *base) 1173 { 1174 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1175 1176 memset(req, 0, sizeof(*req)); 1177 req->cdb[0] = SPDK_SBC_START_STOP_UNIT; 1178 req->cdb[4] = SPDK_SBC_START_STOP_UNIT_START_BIT; 1179 base->iov.iov_len = 0; 1180 1181 return send_scan_io(base); 1182 } 1183 1184 static int 1185 process_scan_start_stop_unit(struct virtio_scsi_scan_base *base) 1186 { 1187 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1188 1189 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1190 return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES); 1191 } 1192 1193 return -1; 1194 } 1195 1196 static int 1197 process_scan_test_unit_ready(struct virtio_scsi_scan_base *base) 1198 { 1199 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1200 int sk, asc, ascq; 1201 1202 get_scsi_status(resp, &sk, &asc, &ascq); 1203 1204 /* check response, get VPD if spun up otherwise send SSU */ 1205 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1206 return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES); 1207 } else if (resp->response == VIRTIO_SCSI_S_OK && 1208 resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION && 1209 sk == SPDK_SCSI_SENSE_UNIT_ATTENTION && 1210 asc == SPDK_SCSI_ASC_LOGICAL_UNIT_NOT_READY) { 1211 return send_start_stop_unit(base); 1212 } else { 1213 return -1; 1214 } 1215 } 1216 1217 static int 1218 process_scan_inquiry_standard(struct virtio_scsi_scan_base *base) 1219 { 1220 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1221 struct spdk_scsi_cdb_inquiry_data *inquiry_data = 1222 (struct spdk_scsi_cdb_inquiry_data *)base->payload; 1223 1224 if (resp->status != SPDK_SCSI_STATUS_GOOD) { 1225 return -1; 1226 } 1227 1228 /* check to make sure its a supported device */ 1229 if (inquiry_data->peripheral_device_type != SPDK_SPC_PERIPHERAL_DEVICE_TYPE_DISK || 1230 inquiry_data->peripheral_qualifier != SPDK_SPC_PERIPHERAL_QUALIFIER_CONNECTED) { 1231 SPDK_WARNLOG("Unsupported peripheral device type 0x%02x (qualifier 0x%02x)\n", 1232 inquiry_data->peripheral_device_type, 1233 inquiry_data->peripheral_qualifier); 1234 return -1; 1235 } 1236 1237 return send_test_unit_ready(base); 1238 } 1239 1240 static int 1241 process_scan_inquiry_vpd_supported_vpd_pages(struct virtio_scsi_scan_base *base) 1242 { 1243 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1244 bool block_provisioning_page_supported = false; 1245 1246 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1247 const uint8_t *vpd_data = base->payload; 1248 const uint8_t *supported_vpd_pages = vpd_data + 4; 1249 uint16_t page_length; 1250 uint16_t num_supported_pages; 1251 uint16_t i; 1252 1253 page_length = from_be16(vpd_data + 2); 1254 num_supported_pages = spdk_min(page_length, base->iov.iov_len - 4); 1255 1256 for (i = 0; i < num_supported_pages; i++) { 1257 if (supported_vpd_pages[i] == SPDK_SPC_VPD_BLOCK_THIN_PROVISION) { 1258 block_provisioning_page_supported = true; 1259 break; 1260 } 1261 } 1262 } 1263 1264 if (block_provisioning_page_supported) { 1265 return send_inquiry_vpd(base, SPDK_SPC_VPD_BLOCK_THIN_PROVISION); 1266 } else { 1267 return send_read_cap_10(base); 1268 } 1269 } 1270 1271 static int 1272 process_scan_inquiry_vpd_block_thin_provision(struct virtio_scsi_scan_base *base) 1273 { 1274 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1275 1276 base->info.unmap_supported = false; 1277 1278 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1279 uint8_t *vpd_data = base->payload; 1280 1281 base->info.unmap_supported = !!(vpd_data[5] & SPDK_SCSI_UNMAP_LBPU); 1282 } 1283 1284 SPDK_INFOLOG(virtio, "Target %u: unmap supported = %d\n", 1285 base->info.target, (int)base->info.unmap_supported); 1286 1287 return send_read_cap_10(base); 1288 } 1289 1290 static int 1291 process_scan_inquiry(struct virtio_scsi_scan_base *base) 1292 { 1293 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1294 struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1295 1296 if ((inquiry_cdb->evpd & 1) == 0) { 1297 return process_scan_inquiry_standard(base); 1298 } 1299 1300 switch (inquiry_cdb->page_code) { 1301 case SPDK_SPC_VPD_SUPPORTED_VPD_PAGES: 1302 return process_scan_inquiry_vpd_supported_vpd_pages(base); 1303 case SPDK_SPC_VPD_BLOCK_THIN_PROVISION: 1304 return process_scan_inquiry_vpd_block_thin_provision(base); 1305 default: 1306 SPDK_DEBUGLOG(virtio, "Unexpected VPD page 0x%02x\n", inquiry_cdb->page_code); 1307 return -1; 1308 } 1309 } 1310 1311 static void 1312 bdev_virtio_disk_notify_remove(struct virtio_scsi_disk *disk) 1313 { 1314 disk->removed = true; 1315 spdk_bdev_close(disk->notify_desc); 1316 } 1317 1318 static void 1319 bdev_virtio_disk_notify_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 1320 void *event_ctx) 1321 { 1322 switch (type) { 1323 case SPDK_BDEV_EVENT_REMOVE: 1324 bdev_virtio_disk_notify_remove(event_ctx); 1325 break; 1326 default: 1327 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 1328 break; 1329 } 1330 } 1331 1332 /* To be called only from the thread performing target scan */ 1333 static int 1334 virtio_scsi_dev_add_tgt(struct virtio_scsi_dev *svdev, struct virtio_scsi_scan_info *info) 1335 { 1336 struct virtio_scsi_disk *disk; 1337 struct spdk_bdev *bdev; 1338 int rc; 1339 1340 TAILQ_FOREACH(disk, &svdev->luns, link) { 1341 if (disk->info.target == info->target) { 1342 /* Target is already attached and param change is not supported */ 1343 return 0; 1344 } 1345 } 1346 1347 if (info->block_size == 0 || info->num_blocks == 0) { 1348 SPDK_ERRLOG("%s: invalid target %u: bs=%"PRIu32" blocks=%"PRIu64"\n", 1349 svdev->vdev.name, info->target, info->block_size, info->num_blocks); 1350 return -EINVAL; 1351 } 1352 1353 disk = calloc(1, sizeof(*disk)); 1354 if (disk == NULL) { 1355 SPDK_ERRLOG("could not allocate disk\n"); 1356 return -ENOMEM; 1357 } 1358 1359 disk->svdev = svdev; 1360 memcpy(&disk->info, info, sizeof(*info)); 1361 1362 bdev = &disk->bdev; 1363 bdev->name = spdk_sprintf_alloc("%st%"PRIu8, svdev->vdev.name, info->target); 1364 if (bdev->name == NULL) { 1365 SPDK_ERRLOG("Couldn't alloc memory for the bdev name.\n"); 1366 free(disk); 1367 return -ENOMEM; 1368 } 1369 1370 bdev->product_name = "Virtio SCSI Disk"; 1371 bdev->write_cache = 0; 1372 bdev->blocklen = disk->info.block_size; 1373 bdev->blockcnt = disk->info.num_blocks; 1374 1375 bdev->ctxt = disk; 1376 bdev->fn_table = &virtio_fn_table; 1377 bdev->module = &virtio_scsi_if; 1378 1379 rc = spdk_bdev_register(&disk->bdev); 1380 if (rc) { 1381 SPDK_ERRLOG("Failed to register bdev name=%s\n", disk->bdev.name); 1382 free(bdev->name); 1383 free(disk); 1384 return rc; 1385 } 1386 1387 rc = spdk_bdev_open_ext(bdev->name, false, bdev_virtio_disk_notify_event_cb, 1388 disk, &disk->notify_desc); 1389 if (rc) { 1390 assert(false); 1391 } 1392 1393 TAILQ_INSERT_TAIL(&svdev->luns, disk, link); 1394 return 0; 1395 } 1396 1397 static int 1398 process_read_cap_10(struct virtio_scsi_scan_base *base) 1399 { 1400 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1401 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1402 uint64_t max_block; 1403 uint32_t block_size; 1404 uint8_t target_id = req->lun[1]; 1405 int rc; 1406 1407 if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) { 1408 SPDK_ERRLOG("READ CAPACITY (10) failed for target %"PRIu8".\n", target_id); 1409 return -1; 1410 } 1411 1412 block_size = from_be32(base->payload + 4); 1413 max_block = from_be32(base->payload); 1414 1415 if (max_block == 0xffffffff) { 1416 return send_read_cap_16(base); 1417 } 1418 1419 base->info.num_blocks = (uint64_t)max_block + 1; 1420 base->info.block_size = block_size; 1421 1422 rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info); 1423 if (rc != 0) { 1424 return rc; 1425 } 1426 1427 return _virtio_scsi_dev_scan_next(base, 0); 1428 } 1429 1430 static int 1431 process_read_cap_16(struct virtio_scsi_scan_base *base) 1432 { 1433 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1434 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1435 uint8_t target_id = req->lun[1]; 1436 int rc; 1437 1438 if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) { 1439 SPDK_ERRLOG("READ CAPACITY (16) failed for target %"PRIu8".\n", target_id); 1440 return -1; 1441 } 1442 1443 base->info.num_blocks = from_be64(base->payload) + 1; 1444 base->info.block_size = from_be32(base->payload + 8); 1445 rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info); 1446 if (rc != 0) { 1447 return rc; 1448 } 1449 1450 return _virtio_scsi_dev_scan_next(base, 0); 1451 } 1452 1453 static void 1454 process_scan_resp(struct virtio_scsi_scan_base *base) 1455 { 1456 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1457 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1458 int rc, sk, asc, ascq; 1459 uint8_t target_id; 1460 1461 if (base->io_ctx.iov_req.iov_len < sizeof(struct virtio_scsi_cmd_req) || 1462 base->io_ctx.iov_resp.iov_len < sizeof(struct virtio_scsi_cmd_resp)) { 1463 SPDK_ERRLOG("Received target scan message with invalid length.\n"); 1464 _virtio_scsi_dev_scan_next(base, -EIO); 1465 return; 1466 } 1467 1468 get_scsi_status(resp, &sk, &asc, &ascq); 1469 target_id = req->lun[1]; 1470 1471 if (resp->response == VIRTIO_SCSI_S_BAD_TARGET || 1472 resp->response == VIRTIO_SCSI_S_INCORRECT_LUN) { 1473 _virtio_scsi_dev_scan_next(base, -ENODEV); 1474 return; 1475 } 1476 1477 if (resp->response != VIRTIO_SCSI_S_OK || 1478 (resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION && 1479 sk != SPDK_SCSI_SENSE_ILLEGAL_REQUEST)) { 1480 assert(base->retries > 0); 1481 base->retries--; 1482 if (base->retries == 0) { 1483 SPDK_NOTICELOG("Target %"PRIu8" is present, but unavailable.\n", target_id); 1484 SPDK_LOGDUMP(virtio, "CDB", req->cdb, sizeof(req->cdb)); 1485 SPDK_LOGDUMP(virtio, "SENSE DATA", resp->sense, sizeof(resp->sense)); 1486 _virtio_scsi_dev_scan_next(base, -EBUSY); 1487 return; 1488 } 1489 1490 /* resend the same request */ 1491 rc = send_scan_io(base); 1492 if (rc != 0) { 1493 /* Let response poller do the resend */ 1494 } 1495 return; 1496 } 1497 1498 base->retries = SCAN_REQUEST_RETRIES; 1499 1500 switch (req->cdb[0]) { 1501 case SPDK_SPC_INQUIRY: 1502 rc = process_scan_inquiry(base); 1503 break; 1504 case SPDK_SPC_TEST_UNIT_READY: 1505 rc = process_scan_test_unit_ready(base); 1506 break; 1507 case SPDK_SBC_START_STOP_UNIT: 1508 rc = process_scan_start_stop_unit(base); 1509 break; 1510 case SPDK_SBC_READ_CAPACITY_10: 1511 rc = process_read_cap_10(base); 1512 break; 1513 case SPDK_SPC_SERVICE_ACTION_IN_16: 1514 rc = process_read_cap_16(base); 1515 break; 1516 default: 1517 SPDK_ERRLOG("Received invalid target scan message: cdb[0] = %"PRIu8".\n", req->cdb[0]); 1518 rc = -1; 1519 break; 1520 } 1521 1522 if (rc != 0) { 1523 if (base->needs_resend) { 1524 return; /* Let response poller do the resend */ 1525 } 1526 1527 _virtio_scsi_dev_scan_next(base, rc); 1528 } 1529 } 1530 1531 static int 1532 _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc) 1533 { 1534 struct virtio_scsi_scan_info *next; 1535 struct virtio_scsi_disk *disk; 1536 uint8_t target_id; 1537 1538 if (base->full_scan) { 1539 if (rc != 0) { 1540 disk = virtio_scsi_dev_get_disk_by_id(base->svdev, 1541 base->info.target); 1542 if (disk != NULL) { 1543 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 1544 } 1545 } 1546 1547 target_id = base->info.target + 1; 1548 if (target_id < BDEV_VIRTIO_MAX_TARGET) { 1549 _virtio_scsi_dev_scan_tgt(base, target_id); 1550 return 0; 1551 } 1552 1553 base->full_scan = false; 1554 } 1555 1556 next = TAILQ_FIRST(&base->scan_queue); 1557 if (next == NULL) { 1558 _virtio_scsi_dev_scan_finish(base, 0); 1559 return 0; 1560 } 1561 1562 TAILQ_REMOVE(&base->scan_queue, next, tailq); 1563 target_id = next->target; 1564 free(next); 1565 1566 _virtio_scsi_dev_scan_tgt(base, target_id); 1567 return 0; 1568 } 1569 1570 static int 1571 _virtio_scsi_dev_scan_init(struct virtio_scsi_dev *svdev) 1572 { 1573 struct virtio_scsi_scan_base *base; 1574 struct spdk_io_channel *io_ch; 1575 struct virtio_scsi_io_ctx *io_ctx; 1576 struct virtio_scsi_cmd_req *req; 1577 struct virtio_scsi_cmd_resp *resp; 1578 1579 io_ch = spdk_get_io_channel(svdev); 1580 if (io_ch == NULL) { 1581 return -EBUSY; 1582 } 1583 1584 base = spdk_zmalloc(sizeof(*base), 64, NULL, 1585 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1586 if (base == NULL) { 1587 SPDK_ERRLOG("couldn't allocate memory for scsi target scan.\n"); 1588 return -ENOMEM; 1589 } 1590 1591 base->svdev = svdev; 1592 1593 base->channel = spdk_io_channel_get_ctx(io_ch); 1594 TAILQ_INIT(&base->scan_queue); 1595 svdev->scan_ctx = base; 1596 1597 base->iov.iov_base = base->payload; 1598 io_ctx = &base->io_ctx; 1599 req = &io_ctx->req; 1600 resp = &io_ctx->resp; 1601 io_ctx->iov_req.iov_base = req; 1602 io_ctx->iov_req.iov_len = sizeof(*req); 1603 io_ctx->iov_resp.iov_base = resp; 1604 io_ctx->iov_resp.iov_len = sizeof(*resp); 1605 1606 base->retries = SCAN_REQUEST_RETRIES; 1607 return 0; 1608 } 1609 1610 static void 1611 _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target) 1612 { 1613 int rc; 1614 1615 memset(&base->info, 0, sizeof(base->info)); 1616 base->info.target = target; 1617 1618 rc = send_inquiry(base); 1619 if (rc) { 1620 /* Let response poller do the resend */ 1621 } 1622 } 1623 1624 static int 1625 virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev, bdev_virtio_create_cb cb_fn, 1626 void *cb_arg) 1627 { 1628 struct virtio_scsi_scan_base *base; 1629 struct virtio_scsi_scan_info *tgt, *next_tgt; 1630 int rc; 1631 1632 if (svdev->scan_ctx) { 1633 if (svdev->scan_ctx->full_scan) { 1634 return -EEXIST; 1635 } 1636 1637 /* We're about to start a full rescan, so there's no need 1638 * to scan particular targets afterwards. 1639 */ 1640 TAILQ_FOREACH_SAFE(tgt, &svdev->scan_ctx->scan_queue, tailq, next_tgt) { 1641 TAILQ_REMOVE(&svdev->scan_ctx->scan_queue, tgt, tailq); 1642 free(tgt); 1643 } 1644 1645 svdev->scan_ctx->cb_fn = cb_fn; 1646 svdev->scan_ctx->cb_arg = cb_arg; 1647 svdev->scan_ctx->restart = true; 1648 return 0; 1649 } 1650 1651 rc = _virtio_scsi_dev_scan_init(svdev); 1652 if (rc != 0) { 1653 return rc; 1654 } 1655 1656 base = svdev->scan_ctx; 1657 base->cb_fn = cb_fn; 1658 base->cb_arg = cb_arg; 1659 base->full_scan = true; 1660 1661 _virtio_scsi_dev_scan_tgt(base, 0); 1662 return 0; 1663 } 1664 1665 static int 1666 virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target) 1667 { 1668 struct virtio_scsi_scan_base *base; 1669 struct virtio_scsi_scan_info *info; 1670 int rc; 1671 1672 base = svdev->scan_ctx; 1673 if (base) { 1674 info = calloc(1, sizeof(*info)); 1675 if (info == NULL) { 1676 SPDK_ERRLOG("calloc failed\n"); 1677 return -ENOMEM; 1678 } 1679 1680 info->target = target; 1681 TAILQ_INSERT_TAIL(&base->scan_queue, info, tailq); 1682 return 0; 1683 } 1684 1685 rc = _virtio_scsi_dev_scan_init(svdev); 1686 if (rc != 0) { 1687 return rc; 1688 } 1689 1690 base = svdev->scan_ctx; 1691 base->full_scan = true; 1692 _virtio_scsi_dev_scan_tgt(base, target); 1693 return 0; 1694 } 1695 1696 static int 1697 bdev_virtio_initialize(void) 1698 { 1699 return 0; 1700 } 1701 1702 static void 1703 _virtio_scsi_dev_unregister_cb(void *io_device) 1704 { 1705 struct virtio_scsi_dev *svdev = io_device; 1706 struct virtio_dev *vdev = &svdev->vdev; 1707 bool finish_module; 1708 bdev_virtio_remove_cb remove_cb; 1709 void *remove_ctx; 1710 1711 assert(spdk_ring_count(svdev->ctrlq_ring) == 0); 1712 spdk_ring_free(svdev->ctrlq_ring); 1713 spdk_poller_unregister(&svdev->mgmt_poller); 1714 1715 virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ); 1716 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 1717 1718 virtio_dev_stop(vdev); 1719 virtio_dev_destruct(vdev); 1720 1721 pthread_mutex_lock(&g_virtio_scsi_mutex); 1722 TAILQ_REMOVE(&g_virtio_scsi_devs, svdev, tailq); 1723 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1724 1725 remove_cb = svdev->remove_cb; 1726 remove_ctx = svdev->remove_ctx; 1727 spdk_free(svdev->eventq_ios); 1728 free(svdev); 1729 1730 if (remove_cb) { 1731 remove_cb(remove_ctx, 0); 1732 } 1733 1734 finish_module = TAILQ_EMPTY(&g_virtio_scsi_devs); 1735 1736 if (g_bdev_virtio_finish && finish_module) { 1737 spdk_bdev_module_finish_done(); 1738 } 1739 } 1740 1741 static void 1742 virtio_scsi_dev_unregister_cb(void *io_device) 1743 { 1744 struct virtio_scsi_dev *svdev = io_device; 1745 struct spdk_thread *thread; 1746 1747 thread = virtio_dev_queue_get_thread(&svdev->vdev, VIRTIO_SCSI_CONTROLQ); 1748 spdk_thread_send_msg(thread, _virtio_scsi_dev_unregister_cb, io_device); 1749 } 1750 1751 static void 1752 virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev, 1753 bdev_virtio_remove_cb cb_fn, void *cb_arg) 1754 { 1755 struct virtio_scsi_disk *disk, *disk_tmp; 1756 bool do_remove = true; 1757 1758 if (svdev->removed) { 1759 if (cb_fn) { 1760 cb_fn(cb_arg, -EBUSY); 1761 } 1762 return; 1763 } 1764 1765 svdev->remove_cb = cb_fn; 1766 svdev->remove_ctx = cb_arg; 1767 svdev->removed = true; 1768 1769 if (svdev->scan_ctx) { 1770 /* The removal will continue after we receive a pending scan I/O. */ 1771 return; 1772 } 1773 1774 TAILQ_FOREACH_SAFE(disk, &svdev->luns, link, disk_tmp) { 1775 if (!disk->removed) { 1776 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 1777 } 1778 do_remove = false; 1779 } 1780 1781 if (do_remove) { 1782 spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb); 1783 } 1784 } 1785 1786 static void 1787 bdev_virtio_finish(void) 1788 { 1789 struct virtio_scsi_dev *svdev, *next; 1790 1791 g_bdev_virtio_finish = true; 1792 1793 pthread_mutex_lock(&g_virtio_scsi_mutex); 1794 if (TAILQ_EMPTY(&g_virtio_scsi_devs)) { 1795 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1796 spdk_bdev_module_finish_done(); 1797 return; 1798 } 1799 1800 /* Defer module finish until all controllers are removed. */ 1801 TAILQ_FOREACH_SAFE(svdev, &g_virtio_scsi_devs, tailq, next) { 1802 virtio_scsi_dev_remove(svdev, NULL, NULL); 1803 } 1804 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1805 } 1806 1807 int 1808 bdev_virtio_user_scsi_dev_create(const char *base_name, const char *path, 1809 unsigned num_queues, unsigned queue_size, 1810 bdev_virtio_create_cb cb_fn, void *cb_arg) 1811 { 1812 struct virtio_scsi_dev *svdev; 1813 int rc; 1814 1815 svdev = virtio_user_scsi_dev_create(base_name, path, num_queues, queue_size); 1816 if (svdev == NULL) { 1817 return -1; 1818 } 1819 1820 rc = virtio_scsi_dev_scan(svdev, cb_fn, cb_arg); 1821 if (rc) { 1822 virtio_scsi_dev_remove(svdev, NULL, NULL); 1823 } 1824 1825 return rc; 1826 } 1827 1828 struct bdev_virtio_pci_dev_create_ctx { 1829 const char *name; 1830 bdev_virtio_create_cb cb_fn; 1831 void *cb_arg; 1832 }; 1833 1834 static int 1835 bdev_virtio_pci_scsi_dev_create_cb(struct virtio_pci_ctx *pci_ctx, void *ctx) 1836 { 1837 struct virtio_scsi_dev *svdev; 1838 struct bdev_virtio_pci_dev_create_ctx *create_ctx = ctx; 1839 int rc; 1840 1841 svdev = virtio_pci_scsi_dev_create(create_ctx->name, pci_ctx); 1842 if (svdev == NULL) { 1843 return -1; 1844 } 1845 1846 rc = virtio_scsi_dev_scan(svdev, create_ctx->cb_fn, create_ctx->cb_arg); 1847 if (rc) { 1848 svdev->vdev.ctx = NULL; 1849 virtio_scsi_dev_remove(svdev, NULL, NULL); 1850 } 1851 1852 return rc; 1853 } 1854 1855 int 1856 bdev_virtio_pci_scsi_dev_create(const char *name, struct spdk_pci_addr *pci_addr, 1857 bdev_virtio_create_cb cb_fn, void *cb_arg) 1858 { 1859 struct bdev_virtio_pci_dev_create_ctx create_ctx; 1860 1861 create_ctx.name = name; 1862 create_ctx.cb_fn = cb_fn; 1863 create_ctx.cb_arg = cb_arg; 1864 1865 return virtio_pci_dev_attach(bdev_virtio_pci_scsi_dev_create_cb, &create_ctx, 1866 VIRTIO_ID_SCSI, pci_addr); 1867 } 1868 1869 int 1870 bdev_virtio_scsi_dev_remove(const char *name, bdev_virtio_remove_cb cb_fn, void *cb_arg) 1871 { 1872 struct virtio_scsi_dev *svdev; 1873 1874 pthread_mutex_lock(&g_virtio_scsi_mutex); 1875 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 1876 if (strcmp(svdev->vdev.name, name) == 0) { 1877 break; 1878 } 1879 } 1880 1881 if (svdev == NULL) { 1882 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1883 SPDK_ERRLOG("Cannot find Virtio-SCSI device named '%s'\n", name); 1884 return -ENODEV; 1885 } 1886 1887 virtio_scsi_dev_remove(svdev, cb_fn, cb_arg); 1888 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1889 1890 return 0; 1891 } 1892 1893 void 1894 bdev_virtio_scsi_dev_list(struct spdk_json_write_ctx *w) 1895 { 1896 struct virtio_scsi_dev *svdev; 1897 1898 spdk_json_write_array_begin(w); 1899 1900 pthread_mutex_lock(&g_virtio_scsi_mutex); 1901 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 1902 spdk_json_write_object_begin(w); 1903 1904 spdk_json_write_named_string(w, "name", svdev->vdev.name); 1905 1906 virtio_dev_dump_json_info(&svdev->vdev, w); 1907 1908 spdk_json_write_object_end(w); 1909 } 1910 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1911 1912 spdk_json_write_array_end(w); 1913 } 1914 1915 SPDK_LOG_REGISTER_COMPONENT(virtio) 1916