1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk/stdinc.h" 7 8 #include "spdk/bdev.h" 9 #include "spdk/endian.h" 10 #include "spdk/env.h" 11 #include "spdk/thread.h" 12 #include "spdk/scsi_spec.h" 13 #include "spdk/string.h" 14 #include "spdk/util.h" 15 #include "spdk/json.h" 16 17 #include "spdk/bdev_module.h" 18 #include "spdk/log.h" 19 #include "spdk_internal/virtio.h" 20 #include "spdk_internal/vhost_user.h" 21 22 #include <linux/virtio_scsi.h> 23 #include <linux/virtio_ids.h> 24 25 #include "bdev_virtio.h" 26 27 #define BDEV_VIRTIO_MAX_TARGET 64 28 #define BDEV_VIRTIO_SCAN_PAYLOAD_SIZE 256 29 #define MGMT_POLL_PERIOD_US (1000 * 5) 30 #define CTRLQ_RING_SIZE 16 31 #define SCAN_REQUEST_RETRIES 5 32 33 /* Number of non-request queues - eventq and controlq */ 34 #define SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED 2 35 36 #define VIRTIO_SCSI_EVENTQ_BUFFER_COUNT 16 37 38 #define VIRTIO_SCSI_CONTROLQ 0 39 #define VIRTIO_SCSI_EVENTQ 1 40 #define VIRTIO_SCSI_REQUESTQ 2 41 42 static int bdev_virtio_initialize(void); 43 static void bdev_virtio_finish(void); 44 45 struct virtio_scsi_dev { 46 /* Generic virtio device data. */ 47 struct virtio_dev vdev; 48 49 /** Detected SCSI LUNs */ 50 TAILQ_HEAD(, virtio_scsi_disk) luns; 51 52 /** Context for the SCSI target scan. */ 53 struct virtio_scsi_scan_base *scan_ctx; 54 55 /** Controlq poller. */ 56 struct spdk_poller *mgmt_poller; 57 58 /** Controlq messages to be sent. */ 59 struct spdk_ring *ctrlq_ring; 60 61 /** Buffers for the eventq. */ 62 struct virtio_scsi_eventq_io *eventq_ios; 63 64 /** Device marked for removal. */ 65 bool removed; 66 67 /** Callback to be called after vdev removal. */ 68 bdev_virtio_remove_cb remove_cb; 69 70 /** Context for the `remove_cb`. */ 71 void *remove_ctx; 72 73 TAILQ_ENTRY(virtio_scsi_dev) tailq; 74 }; 75 76 struct virtio_scsi_io_ctx { 77 struct iovec iov_req; 78 struct iovec iov_resp; 79 union { 80 struct virtio_scsi_cmd_req req; 81 struct virtio_scsi_ctrl_tmf_req tmf_req; 82 }; 83 union { 84 struct virtio_scsi_cmd_resp resp; 85 struct virtio_scsi_ctrl_tmf_resp tmf_resp; 86 }; 87 }; 88 89 struct virtio_scsi_eventq_io { 90 struct iovec iov; 91 struct virtio_scsi_event ev; 92 }; 93 94 struct virtio_scsi_scan_info { 95 uint64_t num_blocks; 96 uint32_t block_size; 97 uint8_t target; 98 bool unmap_supported; 99 TAILQ_ENTRY(virtio_scsi_scan_info) tailq; 100 }; 101 102 struct virtio_scsi_scan_base { 103 struct virtio_scsi_dev *svdev; 104 105 /** I/O channel used for the scan I/O. */ 106 struct bdev_virtio_io_channel *channel; 107 108 bdev_virtio_create_cb cb_fn; 109 void *cb_arg; 110 111 /** Scan all targets on the device. */ 112 bool full_scan; 113 114 /** Start a full rescan after receiving next scan I/O response. */ 115 bool restart; 116 117 /** Additional targets to be (re)scanned. */ 118 TAILQ_HEAD(, virtio_scsi_scan_info) scan_queue; 119 120 /** Remaining attempts for sending the current request. */ 121 unsigned retries; 122 123 /** If set, the last scan I/O needs to be resent */ 124 bool needs_resend; 125 126 struct virtio_scsi_io_ctx io_ctx; 127 struct iovec iov; 128 uint8_t payload[BDEV_VIRTIO_SCAN_PAYLOAD_SIZE]; 129 130 /** Scan results for the current target. */ 131 struct virtio_scsi_scan_info info; 132 }; 133 134 struct virtio_scsi_disk { 135 struct spdk_bdev bdev; 136 struct virtio_scsi_dev *svdev; 137 struct virtio_scsi_scan_info info; 138 139 /** Descriptor opened just to be notified of external bdev hotremove. */ 140 struct spdk_bdev_desc *notify_desc; 141 142 /** Disk marked for removal. */ 143 bool removed; 144 TAILQ_ENTRY(virtio_scsi_disk) link; 145 }; 146 147 struct bdev_virtio_io_channel { 148 struct virtio_scsi_dev *svdev; 149 150 /** Virtqueue exclusively assigned to this channel. */ 151 struct virtqueue *vq; 152 153 /** Virtio response poller. */ 154 struct spdk_poller *poller; 155 }; 156 157 static TAILQ_HEAD(, virtio_scsi_dev) g_virtio_scsi_devs = 158 TAILQ_HEAD_INITIALIZER(g_virtio_scsi_devs); 159 160 static pthread_mutex_t g_virtio_scsi_mutex = PTHREAD_MUTEX_INITIALIZER; 161 162 /** Module finish in progress */ 163 static bool g_bdev_virtio_finish = false; 164 165 /* Features desired/implemented by this driver. */ 166 #define VIRTIO_SCSI_DEV_SUPPORTED_FEATURES \ 167 (1ULL << VIRTIO_SCSI_F_INOUT | \ 168 1ULL << VIRTIO_SCSI_F_HOTPLUG | \ 169 1ULL << VIRTIO_RING_F_EVENT_IDX) 170 171 static void virtio_scsi_dev_unregister_cb(void *io_device); 172 static void virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev, 173 bdev_virtio_remove_cb cb_fn, void *cb_arg); 174 static int bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf); 175 static void bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf); 176 static void process_scan_resp(struct virtio_scsi_scan_base *base); 177 static int bdev_virtio_mgmt_poll(void *arg); 178 179 static int 180 virtio_scsi_dev_send_eventq_io(struct virtqueue *vq, struct virtio_scsi_eventq_io *io) 181 { 182 int rc; 183 184 rc = virtqueue_req_start(vq, io, 1); 185 if (rc != 0) { 186 return -1; 187 } 188 189 virtqueue_req_add_iovs(vq, &io->iov, 1, SPDK_VIRTIO_DESC_WR); 190 virtqueue_req_flush(vq); 191 192 return 0; 193 } 194 195 static int 196 virtio_scsi_dev_init(struct virtio_scsi_dev *svdev, uint16_t max_queues, uint64_t feature_bits) 197 { 198 struct virtio_dev *vdev = &svdev->vdev; 199 struct spdk_ring *ctrlq_ring; 200 struct virtio_scsi_eventq_io *eventq_io; 201 struct virtqueue *eventq; 202 uint16_t i, num_events; 203 int rc; 204 205 rc = virtio_dev_reset(vdev, feature_bits); 206 if (rc != 0) { 207 return rc; 208 } 209 210 rc = virtio_dev_start(vdev, max_queues, SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED); 211 if (rc != 0) { 212 return rc; 213 } 214 215 ctrlq_ring = spdk_ring_create(SPDK_RING_TYPE_MP_SC, CTRLQ_RING_SIZE, 216 SPDK_ENV_NUMA_ID_ANY); 217 if (ctrlq_ring == NULL) { 218 SPDK_ERRLOG("Failed to allocate send ring for the controlq.\n"); 219 return -1; 220 } 221 222 rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_CONTROLQ); 223 if (rc != 0) { 224 SPDK_ERRLOG("Failed to acquire the controlq.\n"); 225 spdk_ring_free(ctrlq_ring); 226 return -1; 227 } 228 229 rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_EVENTQ); 230 if (rc != 0) { 231 SPDK_ERRLOG("Failed to acquire the eventq.\n"); 232 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 233 spdk_ring_free(ctrlq_ring); 234 return -1; 235 } 236 237 eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ]; 238 num_events = spdk_min(eventq->vq_nentries, VIRTIO_SCSI_EVENTQ_BUFFER_COUNT); 239 svdev->eventq_ios = spdk_zmalloc(sizeof(*svdev->eventq_ios) * num_events, 240 0, NULL, SPDK_ENV_LCORE_ID_ANY, 241 SPDK_MALLOC_DMA); 242 if (svdev->eventq_ios == NULL) { 243 SPDK_ERRLOG("cannot allocate memory for %"PRIu16" eventq buffers\n", 244 num_events); 245 virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ); 246 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 247 spdk_ring_free(ctrlq_ring); 248 return -1; 249 } 250 251 for (i = 0; i < num_events; i++) { 252 eventq_io = &svdev->eventq_ios[i]; 253 eventq_io->iov.iov_base = &eventq_io->ev; 254 eventq_io->iov.iov_len = sizeof(eventq_io->ev); 255 virtio_scsi_dev_send_eventq_io(eventq, eventq_io); 256 } 257 258 svdev->ctrlq_ring = ctrlq_ring; 259 260 svdev->mgmt_poller = SPDK_POLLER_REGISTER(bdev_virtio_mgmt_poll, svdev, 261 MGMT_POLL_PERIOD_US); 262 263 TAILQ_INIT(&svdev->luns); 264 svdev->scan_ctx = NULL; 265 svdev->removed = false; 266 svdev->remove_cb = NULL; 267 svdev->remove_ctx = NULL; 268 269 spdk_io_device_register(svdev, bdev_virtio_scsi_ch_create_cb, 270 bdev_virtio_scsi_ch_destroy_cb, 271 sizeof(struct bdev_virtio_io_channel), 272 svdev->vdev.name); 273 274 pthread_mutex_lock(&g_virtio_scsi_mutex); 275 TAILQ_INSERT_TAIL(&g_virtio_scsi_devs, svdev, tailq); 276 pthread_mutex_unlock(&g_virtio_scsi_mutex); 277 return 0; 278 } 279 280 static struct virtio_scsi_dev * 281 virtio_pci_scsi_dev_create(const char *name, struct virtio_pci_ctx *pci_ctx) 282 { 283 static int pci_dev_counter = 0; 284 struct virtio_scsi_dev *svdev; 285 struct virtio_dev *vdev; 286 char *default_name = NULL; 287 uint32_t num_queues; 288 int rc; 289 290 svdev = calloc(1, sizeof(*svdev)); 291 if (svdev == NULL) { 292 SPDK_ERRLOG("virtio device calloc failed\n"); 293 return NULL; 294 } 295 296 vdev = &svdev->vdev; 297 if (name == NULL) { 298 default_name = spdk_sprintf_alloc("VirtioScsi%"PRIu32, pci_dev_counter++); 299 if (default_name == NULL) { 300 free(vdev); 301 return NULL; 302 } 303 name = default_name; 304 } 305 306 rc = virtio_pci_dev_init(vdev, name, pci_ctx); 307 free(default_name); 308 309 if (rc != 0) { 310 free(svdev); 311 return NULL; 312 } 313 314 rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_scsi_config, num_queues), 315 &num_queues, sizeof(num_queues)); 316 if (rc) { 317 SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc)); 318 goto fail; 319 } 320 321 rc = virtio_scsi_dev_init(svdev, num_queues, VIRTIO_SCSI_DEV_SUPPORTED_FEATURES); 322 if (rc != 0) { 323 goto fail; 324 } 325 326 return svdev; 327 328 fail: 329 vdev->ctx = NULL; 330 virtio_dev_destruct(vdev); 331 free(svdev); 332 return NULL; 333 } 334 335 static struct virtio_scsi_dev * 336 virtio_user_scsi_dev_create(const char *name, const char *path, 337 uint16_t num_queues, uint32_t queue_size) 338 { 339 struct virtio_scsi_dev *svdev; 340 struct virtio_dev *vdev; 341 uint64_t feature_bits; 342 int rc; 343 344 svdev = calloc(1, sizeof(*svdev)); 345 if (svdev == NULL) { 346 SPDK_ERRLOG("calloc failed for virtio device %s: %s\n", name, path); 347 return NULL; 348 } 349 350 vdev = &svdev->vdev; 351 rc = virtio_user_dev_init(vdev, name, path, queue_size); 352 if (rc != 0) { 353 SPDK_ERRLOG("Failed to create virito device %s: %s\n", name, path); 354 free(svdev); 355 return NULL; 356 } 357 358 feature_bits = VIRTIO_SCSI_DEV_SUPPORTED_FEATURES; 359 feature_bits |= (1ULL << VHOST_USER_F_PROTOCOL_FEATURES); 360 rc = virtio_scsi_dev_init(svdev, num_queues + SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED, feature_bits); 361 if (rc != 0) { 362 virtio_dev_destruct(vdev); 363 free(svdev); 364 return NULL; 365 } 366 367 return svdev; 368 } 369 370 static struct virtio_scsi_disk * 371 virtio_scsi_dev_get_disk_by_id(struct virtio_scsi_dev *svdev, uint8_t target_id) 372 { 373 struct virtio_scsi_disk *disk; 374 375 TAILQ_FOREACH(disk, &svdev->luns, link) { 376 if (disk->info.target == target_id) { 377 return disk; 378 } 379 } 380 381 return NULL; 382 } 383 384 static int virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev, 385 bdev_virtio_create_cb cb_fn, void *cb_arg); 386 static int send_scan_io(struct virtio_scsi_scan_base *base); 387 static void _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target); 388 static int _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc); 389 static void _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum); 390 static int virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target); 391 392 static int 393 bdev_virtio_get_ctx_size(void) 394 { 395 return sizeof(struct virtio_scsi_io_ctx); 396 } 397 398 static int 399 bdev_virtio_scsi_config_json(struct spdk_json_write_ctx *w) 400 { 401 struct virtio_scsi_dev *svdev; 402 403 pthread_mutex_lock(&g_virtio_scsi_mutex); 404 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 405 spdk_json_write_object_begin(w); 406 407 spdk_json_write_named_string(w, "method", "bdev_virtio_attach_controller"); 408 409 spdk_json_write_named_object_begin(w, "params"); 410 spdk_json_write_named_string(w, "name", svdev->vdev.name); 411 spdk_json_write_named_string(w, "dev_type", "scsi"); 412 413 /* Write transport specific parameters. */ 414 svdev->vdev.backend_ops->write_json_config(&svdev->vdev, w); 415 416 spdk_json_write_object_end(w); 417 418 spdk_json_write_object_end(w); 419 420 } 421 pthread_mutex_unlock(&g_virtio_scsi_mutex); 422 423 return 0; 424 } 425 426 427 static struct spdk_bdev_module virtio_scsi_if = { 428 .name = "virtio_scsi", 429 .module_init = bdev_virtio_initialize, 430 .module_fini = bdev_virtio_finish, 431 .get_ctx_size = bdev_virtio_get_ctx_size, 432 .config_json = bdev_virtio_scsi_config_json, 433 .async_fini = true, 434 }; 435 436 SPDK_BDEV_MODULE_REGISTER(virtio_scsi, &virtio_scsi_if) 437 438 static struct virtio_scsi_io_ctx * 439 bdev_virtio_init_io_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 440 { 441 struct virtio_scsi_cmd_req *req; 442 struct virtio_scsi_cmd_resp *resp; 443 struct virtio_scsi_disk *disk = (struct virtio_scsi_disk *)bdev_io->bdev; 444 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 445 446 req = &io_ctx->req; 447 resp = &io_ctx->resp; 448 449 io_ctx->iov_req.iov_base = req; 450 io_ctx->iov_req.iov_len = sizeof(*req); 451 452 io_ctx->iov_resp.iov_base = resp; 453 io_ctx->iov_resp.iov_len = sizeof(*resp); 454 455 memset(req, 0, sizeof(*req)); 456 req->lun[0] = 1; 457 req->lun[1] = disk->info.target; 458 459 return io_ctx; 460 } 461 462 static struct virtio_scsi_io_ctx * 463 bdev_virtio_init_tmf_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 464 { 465 struct virtio_scsi_ctrl_tmf_req *tmf_req; 466 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 467 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 468 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 469 470 tmf_req = &io_ctx->tmf_req; 471 tmf_resp = &io_ctx->tmf_resp; 472 473 io_ctx->iov_req.iov_base = tmf_req; 474 io_ctx->iov_req.iov_len = sizeof(*tmf_req); 475 io_ctx->iov_resp.iov_base = tmf_resp; 476 io_ctx->iov_resp.iov_len = sizeof(*tmf_resp); 477 478 memset(tmf_req, 0, sizeof(*tmf_req)); 479 tmf_req->lun[0] = 1; 480 tmf_req->lun[1] = disk->info.target; 481 482 return io_ctx; 483 } 484 485 static void 486 bdev_virtio_send_io(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 487 { 488 struct bdev_virtio_io_channel *virtio_channel = spdk_io_channel_get_ctx(ch); 489 struct virtqueue *vq = virtio_channel->vq; 490 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 491 int rc; 492 493 rc = virtqueue_req_start(vq, bdev_io, bdev_io->u.bdev.iovcnt + 2); 494 if (rc == -ENOMEM) { 495 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 496 return; 497 } else if (rc != 0) { 498 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 499 return; 500 } 501 502 virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 503 if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 504 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 505 virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 506 SPDK_VIRTIO_DESC_WR); 507 } else { 508 virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 509 SPDK_VIRTIO_DESC_RO); 510 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 511 } 512 513 virtqueue_req_flush(vq); 514 } 515 516 static void 517 bdev_virtio_rw(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 518 { 519 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 520 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io); 521 struct virtio_scsi_cmd_req *req = &io_ctx->req; 522 bool is_write = bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE; 523 524 if (disk->info.num_blocks > (1ULL << 32)) { 525 req->cdb[0] = is_write ? SPDK_SBC_WRITE_16 : SPDK_SBC_READ_16; 526 to_be64(&req->cdb[2], bdev_io->u.bdev.offset_blocks); 527 to_be32(&req->cdb[10], bdev_io->u.bdev.num_blocks); 528 } else { 529 req->cdb[0] = is_write ? SPDK_SBC_WRITE_10 : SPDK_SBC_READ_10; 530 to_be32(&req->cdb[2], bdev_io->u.bdev.offset_blocks); 531 to_be16(&req->cdb[7], bdev_io->u.bdev.num_blocks); 532 } 533 534 bdev_virtio_send_io(ch, bdev_io); 535 } 536 537 static void 538 bdev_virtio_reset(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 539 { 540 struct bdev_virtio_io_channel *virtio_ch = spdk_io_channel_get_ctx(ch); 541 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_tmf_vreq(ch, bdev_io); 542 struct virtio_scsi_ctrl_tmf_req *tmf_req = &io_ctx->tmf_req; 543 struct virtio_scsi_dev *svdev = virtio_ch->svdev; 544 size_t enqueued_count; 545 546 tmf_req->type = VIRTIO_SCSI_T_TMF; 547 tmf_req->subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET; 548 549 enqueued_count = spdk_ring_enqueue(svdev->ctrlq_ring, (void **)&bdev_io, 1, NULL); 550 if (spdk_likely(enqueued_count == 1)) { 551 return; 552 } else { 553 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 554 } 555 } 556 557 static void 558 bdev_virtio_unmap(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success) 559 { 560 struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io); 561 struct virtio_scsi_cmd_req *req = &io_ctx->req; 562 struct spdk_scsi_unmap_bdesc *desc, *first_desc; 563 uint8_t *buf; 564 uint64_t offset_blocks, num_blocks; 565 uint16_t cmd_len; 566 567 if (!success) { 568 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 569 return; 570 } 571 572 buf = bdev_io->u.bdev.iovs[0].iov_base; 573 574 offset_blocks = bdev_io->u.bdev.offset_blocks; 575 num_blocks = bdev_io->u.bdev.num_blocks; 576 577 /* (n-1) * 16-byte descriptors */ 578 first_desc = desc = (struct spdk_scsi_unmap_bdesc *)&buf[8]; 579 while (num_blocks > UINT32_MAX) { 580 to_be64(&desc->lba, offset_blocks); 581 to_be32(&desc->block_count, UINT32_MAX); 582 memset(&desc->reserved, 0, sizeof(desc->reserved)); 583 offset_blocks += UINT32_MAX; 584 num_blocks -= UINT32_MAX; 585 desc++; 586 } 587 588 /* The last descriptor with block_count <= UINT32_MAX */ 589 to_be64(&desc->lba, offset_blocks); 590 to_be32(&desc->block_count, num_blocks); 591 memset(&desc->reserved, 0, sizeof(desc->reserved)); 592 593 /* 8-byte header + n * 16-byte block descriptor */ 594 cmd_len = 8 + (desc - first_desc + 1) * sizeof(struct spdk_scsi_unmap_bdesc); 595 596 req->cdb[0] = SPDK_SBC_UNMAP; 597 to_be16(&req->cdb[7], cmd_len); 598 599 /* 8-byte header */ 600 to_be16(&buf[0], cmd_len - 2); /* total length (excluding the length field) */ 601 to_be16(&buf[2], cmd_len - 8); /* length of block descriptors */ 602 memset(&buf[4], 0, 4); /* reserved */ 603 604 bdev_virtio_send_io(ch, bdev_io); 605 } 606 607 static void 608 bdev_virtio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 609 bool success) 610 { 611 if (!success) { 612 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 613 return; 614 } 615 616 bdev_virtio_rw(ch, bdev_io); 617 } 618 619 static int 620 _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 621 { 622 struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev); 623 624 switch (bdev_io->type) { 625 case SPDK_BDEV_IO_TYPE_READ: 626 spdk_bdev_io_get_buf(bdev_io, bdev_virtio_get_buf_cb, 627 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 628 return 0; 629 case SPDK_BDEV_IO_TYPE_WRITE: 630 bdev_virtio_rw(ch, bdev_io); 631 return 0; 632 case SPDK_BDEV_IO_TYPE_RESET: 633 bdev_virtio_reset(ch, bdev_io); 634 return 0; 635 case SPDK_BDEV_IO_TYPE_UNMAP: { 636 uint64_t buf_len = 8 /* header size */ + 637 (bdev_io->u.bdev.num_blocks + UINT32_MAX - 1) / 638 UINT32_MAX * sizeof(struct spdk_scsi_unmap_bdesc); 639 640 if (!disk->info.unmap_supported) { 641 return -1; 642 } 643 644 if (buf_len > SPDK_BDEV_LARGE_BUF_MAX_SIZE) { 645 SPDK_ERRLOG("Trying to UNMAP too many blocks: %"PRIu64"\n", 646 bdev_io->u.bdev.num_blocks); 647 return -1; 648 } 649 spdk_bdev_io_get_buf(bdev_io, bdev_virtio_unmap, buf_len); 650 return 0; 651 } 652 case SPDK_BDEV_IO_TYPE_FLUSH: 653 default: 654 return -1; 655 } 656 return 0; 657 } 658 659 static void 660 bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 661 { 662 if (_bdev_virtio_submit_request(ch, bdev_io) < 0) { 663 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 664 } 665 } 666 667 static bool 668 bdev_virtio_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) 669 { 670 struct virtio_scsi_disk *disk = ctx; 671 672 switch (io_type) { 673 case SPDK_BDEV_IO_TYPE_READ: 674 case SPDK_BDEV_IO_TYPE_WRITE: 675 case SPDK_BDEV_IO_TYPE_FLUSH: 676 case SPDK_BDEV_IO_TYPE_RESET: 677 return true; 678 679 case SPDK_BDEV_IO_TYPE_UNMAP: 680 return disk->info.unmap_supported; 681 682 default: 683 return false; 684 } 685 } 686 687 static struct spdk_io_channel * 688 bdev_virtio_get_io_channel(void *ctx) 689 { 690 struct virtio_scsi_disk *disk = ctx; 691 692 return spdk_get_io_channel(disk->svdev); 693 } 694 695 static int 696 bdev_virtio_disk_destruct(void *ctx) 697 { 698 struct virtio_scsi_disk *disk = ctx; 699 struct virtio_scsi_dev *svdev = disk->svdev; 700 701 TAILQ_REMOVE(&svdev->luns, disk, link); 702 free(disk->bdev.name); 703 free(disk); 704 705 if (svdev->removed && TAILQ_EMPTY(&svdev->luns)) { 706 spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb); 707 } 708 709 return 0; 710 } 711 712 static int 713 bdev_virtio_dump_info_json(void *ctx, struct spdk_json_write_ctx *w) 714 { 715 struct virtio_scsi_disk *disk = ctx; 716 717 virtio_dev_dump_json_info(&disk->svdev->vdev, w); 718 return 0; 719 } 720 721 static void 722 bdev_virtio_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w) 723 { 724 /* SCSI targets and LUNS are discovered during scan process so nothing 725 * to save here. 726 */ 727 } 728 729 static const struct spdk_bdev_fn_table virtio_fn_table = { 730 .destruct = bdev_virtio_disk_destruct, 731 .submit_request = bdev_virtio_submit_request, 732 .io_type_supported = bdev_virtio_io_type_supported, 733 .get_io_channel = bdev_virtio_get_io_channel, 734 .dump_info_json = bdev_virtio_dump_info_json, 735 .write_config_json = bdev_virtio_write_config_json, 736 }; 737 738 static void 739 get_scsi_status(struct virtio_scsi_cmd_resp *resp, int *sk, int *asc, int *ascq) 740 { 741 /* see spdk_scsi_task_build_sense_data() for sense data details */ 742 *sk = 0; 743 *asc = 0; 744 *ascq = 0; 745 746 if (resp->sense_len < 3) { 747 return; 748 } 749 750 *sk = resp->sense[2] & 0xf; 751 752 if (resp->sense_len < 13) { 753 return; 754 } 755 756 *asc = resp->sense[12]; 757 758 if (resp->sense_len < 14) { 759 return; 760 } 761 762 *ascq = resp->sense[13]; 763 } 764 765 static void 766 bdev_virtio_io_cpl(struct spdk_bdev_io *bdev_io) 767 { 768 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 769 int sk, asc, ascq; 770 771 get_scsi_status(&io_ctx->resp, &sk, &asc, &ascq); 772 spdk_bdev_io_complete_scsi_status(bdev_io, io_ctx->resp.status, sk, asc, ascq); 773 } 774 775 static int 776 bdev_virtio_poll(void *arg) 777 { 778 struct bdev_virtio_io_channel *ch = arg; 779 struct virtio_scsi_dev *svdev = ch->svdev; 780 struct virtio_scsi_scan_base *scan_ctx = svdev->scan_ctx; 781 void *io[32]; 782 uint32_t io_len[32]; 783 uint16_t i, cnt; 784 int rc; 785 786 cnt = virtio_recv_pkts(ch->vq, (void **)io, io_len, SPDK_COUNTOF(io)); 787 for (i = 0; i < cnt; ++i) { 788 if (spdk_unlikely(scan_ctx && io[i] == &scan_ctx->io_ctx)) { 789 if (svdev->removed) { 790 _virtio_scsi_dev_scan_finish(scan_ctx, -EINTR); 791 return SPDK_POLLER_BUSY; 792 } 793 794 if (scan_ctx->restart) { 795 scan_ctx->restart = false; 796 scan_ctx->full_scan = true; 797 _virtio_scsi_dev_scan_tgt(scan_ctx, 0); 798 continue; 799 } 800 801 process_scan_resp(scan_ctx); 802 continue; 803 } 804 805 bdev_virtio_io_cpl(io[i]); 806 } 807 808 /* scan_ctx could have been freed while processing completions above, so 809 * we need to re-read the value again here into the local variable before 810 * using it. 811 */ 812 scan_ctx = svdev->scan_ctx; 813 if (spdk_unlikely(scan_ctx && scan_ctx->needs_resend)) { 814 if (svdev->removed) { 815 _virtio_scsi_dev_scan_finish(scan_ctx, -EINTR); 816 return SPDK_POLLER_BUSY; 817 } else if (cnt == 0) { 818 return SPDK_POLLER_IDLE; 819 } 820 821 rc = send_scan_io(scan_ctx); 822 if (rc != 0) { 823 assert(scan_ctx->retries > 0); 824 scan_ctx->retries--; 825 if (scan_ctx->retries == 0) { 826 SPDK_ERRLOG("Target scan failed unrecoverably with rc = %d.\n", rc); 827 _virtio_scsi_dev_scan_finish(scan_ctx, rc); 828 } 829 } 830 } 831 832 return cnt; 833 } 834 835 static void 836 bdev_virtio_tmf_cpl_cb(void *ctx) 837 { 838 struct spdk_bdev_io *bdev_io = ctx; 839 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 840 841 if (io_ctx->tmf_resp.response == VIRTIO_SCSI_S_OK) { 842 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 843 } else { 844 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 845 } 846 } 847 848 static void 849 bdev_virtio_tmf_cpl(struct spdk_bdev_io *bdev_io) 850 { 851 spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), bdev_virtio_tmf_cpl_cb, bdev_io); 852 } 853 854 static void 855 bdev_virtio_eventq_io_cpl(struct virtio_scsi_dev *svdev, struct virtio_scsi_eventq_io *io) 856 { 857 struct virtio_scsi_event *ev = &io->ev; 858 struct virtio_scsi_disk *disk; 859 860 if (ev->lun[0] != 1) { 861 SPDK_WARNLOG("Received an event with invalid data layout.\n"); 862 goto out; 863 } 864 865 if (ev->event & VIRTIO_SCSI_T_EVENTS_MISSED) { 866 ev->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED; 867 virtio_scsi_dev_scan(svdev, NULL, NULL); 868 } 869 870 switch (ev->event) { 871 case VIRTIO_SCSI_T_NO_EVENT: 872 break; 873 case VIRTIO_SCSI_T_TRANSPORT_RESET: 874 switch (ev->reason) { 875 case VIRTIO_SCSI_EVT_RESET_RESCAN: 876 virtio_scsi_dev_scan_tgt(svdev, ev->lun[1]); 877 break; 878 case VIRTIO_SCSI_EVT_RESET_REMOVED: 879 disk = virtio_scsi_dev_get_disk_by_id(svdev, ev->lun[1]); 880 if (disk != NULL) { 881 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 882 } 883 break; 884 default: 885 break; 886 } 887 break; 888 default: 889 break; 890 } 891 892 out: 893 virtio_scsi_dev_send_eventq_io(svdev->vdev.vqs[VIRTIO_SCSI_EVENTQ], io); 894 } 895 896 static void 897 bdev_virtio_tmf_abort_nomem_cb(void *ctx) 898 { 899 struct spdk_bdev_io *bdev_io = ctx; 900 901 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM); 902 } 903 904 static void 905 bdev_virtio_tmf_abort_ioerr_cb(void *ctx) 906 { 907 struct spdk_bdev_io *bdev_io = ctx; 908 909 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 910 } 911 912 static void 913 bdev_virtio_tmf_abort(struct spdk_bdev_io *bdev_io, int status) 914 { 915 spdk_msg_fn fn; 916 917 if (status == -ENOMEM) { 918 fn = bdev_virtio_tmf_abort_nomem_cb; 919 } else { 920 fn = bdev_virtio_tmf_abort_ioerr_cb; 921 } 922 923 spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), fn, bdev_io); 924 } 925 926 static int 927 bdev_virtio_send_tmf_io(struct virtqueue *ctrlq, struct spdk_bdev_io *bdev_io) 928 { 929 struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx; 930 int rc; 931 932 rc = virtqueue_req_start(ctrlq, bdev_io, 2); 933 if (rc != 0) { 934 return rc; 935 } 936 937 virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 938 virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 939 940 virtqueue_req_flush(ctrlq); 941 return 0; 942 } 943 944 static int 945 bdev_virtio_mgmt_poll(void *arg) 946 { 947 struct virtio_scsi_dev *svdev = arg; 948 struct virtio_dev *vdev = &svdev->vdev; 949 struct virtqueue *eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ]; 950 struct virtqueue *ctrlq = vdev->vqs[VIRTIO_SCSI_CONTROLQ]; 951 struct spdk_ring *send_ring = svdev->ctrlq_ring; 952 void *io[16]; 953 uint32_t io_len[16]; 954 uint16_t i, cnt; 955 int rc; 956 int total = 0; 957 958 cnt = spdk_ring_dequeue(send_ring, io, SPDK_COUNTOF(io)); 959 total += cnt; 960 for (i = 0; i < cnt; ++i) { 961 rc = bdev_virtio_send_tmf_io(ctrlq, io[i]); 962 if (rc != 0) { 963 bdev_virtio_tmf_abort(io[i], rc); 964 } 965 } 966 967 cnt = virtio_recv_pkts(ctrlq, io, io_len, SPDK_COUNTOF(io)); 968 total += cnt; 969 for (i = 0; i < cnt; ++i) { 970 bdev_virtio_tmf_cpl(io[i]); 971 } 972 973 cnt = virtio_recv_pkts(eventq, io, io_len, SPDK_COUNTOF(io)); 974 total += cnt; 975 for (i = 0; i < cnt; ++i) { 976 bdev_virtio_eventq_io_cpl(svdev, io[i]); 977 } 978 979 return total; 980 } 981 982 static int 983 bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf) 984 { 985 struct virtio_scsi_dev *svdev = io_device; 986 struct virtio_dev *vdev = &svdev->vdev; 987 struct bdev_virtio_io_channel *ch = ctx_buf; 988 struct virtqueue *vq; 989 int32_t queue_idx; 990 991 queue_idx = virtio_dev_find_and_acquire_queue(vdev, VIRTIO_SCSI_REQUESTQ); 992 if (queue_idx < 0) { 993 SPDK_ERRLOG("Couldn't get an unused queue for the io_channel.\n"); 994 return -1; 995 } 996 997 vq = vdev->vqs[queue_idx]; 998 999 ch->svdev = svdev; 1000 ch->vq = vq; 1001 1002 ch->poller = SPDK_POLLER_REGISTER(bdev_virtio_poll, ch, 0); 1003 1004 return 0; 1005 } 1006 1007 static void 1008 bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf) 1009 { 1010 struct bdev_virtio_io_channel *ch = ctx_buf; 1011 struct virtio_scsi_dev *svdev = ch->svdev; 1012 struct virtio_dev *vdev = &svdev->vdev; 1013 struct virtqueue *vq = ch->vq; 1014 1015 spdk_poller_unregister(&ch->poller); 1016 virtio_dev_release_queue(vdev, vq->vq_queue_index); 1017 } 1018 1019 static void 1020 _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum) 1021 { 1022 struct virtio_scsi_dev *svdev = base->svdev; 1023 size_t bdevs_cnt; 1024 struct spdk_bdev *bdevs[BDEV_VIRTIO_MAX_TARGET]; 1025 struct virtio_scsi_disk *disk; 1026 struct virtio_scsi_scan_info *tgt, *next_tgt; 1027 1028 spdk_put_io_channel(spdk_io_channel_from_ctx(base->channel)); 1029 base->svdev->scan_ctx = NULL; 1030 1031 TAILQ_FOREACH_SAFE(tgt, &base->scan_queue, tailq, next_tgt) { 1032 TAILQ_REMOVE(&base->scan_queue, tgt, tailq); 1033 free(tgt); 1034 } 1035 1036 if (base->cb_fn == NULL) { 1037 spdk_free(base); 1038 return; 1039 } 1040 1041 bdevs_cnt = 0; 1042 if (errnum == 0) { 1043 TAILQ_FOREACH(disk, &svdev->luns, link) { 1044 bdevs[bdevs_cnt] = &disk->bdev; 1045 bdevs_cnt++; 1046 } 1047 } 1048 1049 base->cb_fn(base->cb_arg, errnum, bdevs, bdevs_cnt); 1050 spdk_free(base); 1051 } 1052 1053 static int 1054 send_scan_io(struct virtio_scsi_scan_base *base) 1055 { 1056 struct virtio_scsi_io_ctx *io_ctx = &base->io_ctx; 1057 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1058 struct virtqueue *vq = base->channel->vq; 1059 int payload_iov_cnt = base->iov.iov_len > 0 ? 1 : 0; 1060 int rc; 1061 1062 req->lun[0] = 1; 1063 req->lun[1] = base->info.target; 1064 1065 rc = virtqueue_req_start(vq, io_ctx, 2 + payload_iov_cnt); 1066 if (rc != 0) { 1067 base->needs_resend = true; 1068 return -1; 1069 } 1070 1071 virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO); 1072 virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR); 1073 virtqueue_req_add_iovs(vq, &base->iov, payload_iov_cnt, SPDK_VIRTIO_DESC_WR); 1074 1075 virtqueue_req_flush(vq); 1076 return 0; 1077 } 1078 1079 static int 1080 send_inquiry(struct virtio_scsi_scan_base *base) 1081 { 1082 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1083 struct spdk_scsi_cdb_inquiry *cdb; 1084 1085 memset(req, 0, sizeof(*req)); 1086 1087 base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE; 1088 cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1089 cdb->opcode = SPDK_SPC_INQUIRY; 1090 to_be16(cdb->alloc_len, BDEV_VIRTIO_SCAN_PAYLOAD_SIZE); 1091 1092 return send_scan_io(base); 1093 } 1094 1095 static int 1096 send_inquiry_vpd(struct virtio_scsi_scan_base *base, uint8_t page_code) 1097 { 1098 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1099 struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1100 1101 memset(req, 0, sizeof(*req)); 1102 1103 base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE; 1104 inquiry_cdb->opcode = SPDK_SPC_INQUIRY; 1105 inquiry_cdb->evpd = 1; 1106 inquiry_cdb->page_code = page_code; 1107 to_be16(inquiry_cdb->alloc_len, base->iov.iov_len); 1108 1109 return send_scan_io(base); 1110 } 1111 1112 static int 1113 send_read_cap_10(struct virtio_scsi_scan_base *base) 1114 { 1115 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1116 1117 memset(req, 0, sizeof(*req)); 1118 1119 base->iov.iov_len = 8; 1120 req->cdb[0] = SPDK_SBC_READ_CAPACITY_10; 1121 1122 return send_scan_io(base); 1123 } 1124 1125 static int 1126 send_read_cap_16(struct virtio_scsi_scan_base *base) 1127 { 1128 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1129 1130 memset(req, 0, sizeof(*req)); 1131 1132 base->iov.iov_len = 32; 1133 req->cdb[0] = SPDK_SPC_SERVICE_ACTION_IN_16; 1134 req->cdb[1] = SPDK_SBC_SAI_READ_CAPACITY_16; 1135 to_be32(&req->cdb[10], base->iov.iov_len); 1136 1137 return send_scan_io(base); 1138 } 1139 1140 static int 1141 send_test_unit_ready(struct virtio_scsi_scan_base *base) 1142 { 1143 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1144 1145 memset(req, 0, sizeof(*req)); 1146 req->cdb[0] = SPDK_SPC_TEST_UNIT_READY; 1147 base->iov.iov_len = 0; 1148 1149 return send_scan_io(base); 1150 } 1151 1152 static int 1153 send_start_stop_unit(struct virtio_scsi_scan_base *base) 1154 { 1155 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1156 1157 memset(req, 0, sizeof(*req)); 1158 req->cdb[0] = SPDK_SBC_START_STOP_UNIT; 1159 req->cdb[4] = SPDK_SBC_START_STOP_UNIT_START_BIT; 1160 base->iov.iov_len = 0; 1161 1162 return send_scan_io(base); 1163 } 1164 1165 static int 1166 process_scan_start_stop_unit(struct virtio_scsi_scan_base *base) 1167 { 1168 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1169 1170 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1171 return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES); 1172 } 1173 1174 return -1; 1175 } 1176 1177 static int 1178 process_scan_test_unit_ready(struct virtio_scsi_scan_base *base) 1179 { 1180 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1181 int sk, asc, ascq; 1182 1183 get_scsi_status(resp, &sk, &asc, &ascq); 1184 1185 /* check response, get VPD if spun up otherwise send SSU */ 1186 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1187 return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES); 1188 } else if (resp->response == VIRTIO_SCSI_S_OK && 1189 resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION && 1190 sk == SPDK_SCSI_SENSE_UNIT_ATTENTION && 1191 asc == SPDK_SCSI_ASC_LOGICAL_UNIT_NOT_READY) { 1192 return send_start_stop_unit(base); 1193 } else { 1194 return -1; 1195 } 1196 } 1197 1198 static int 1199 process_scan_inquiry_standard(struct virtio_scsi_scan_base *base) 1200 { 1201 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1202 struct spdk_scsi_cdb_inquiry_data *inquiry_data = 1203 (struct spdk_scsi_cdb_inquiry_data *)base->payload; 1204 1205 if (resp->status != SPDK_SCSI_STATUS_GOOD) { 1206 return -1; 1207 } 1208 1209 /* check to make sure its a supported device */ 1210 if (inquiry_data->peripheral_device_type != SPDK_SPC_PERIPHERAL_DEVICE_TYPE_DISK || 1211 inquiry_data->peripheral_qualifier != SPDK_SPC_PERIPHERAL_QUALIFIER_CONNECTED) { 1212 SPDK_WARNLOG("Unsupported peripheral device type 0x%02x (qualifier 0x%02x)\n", 1213 inquiry_data->peripheral_device_type, 1214 inquiry_data->peripheral_qualifier); 1215 return -1; 1216 } 1217 1218 return send_test_unit_ready(base); 1219 } 1220 1221 static int 1222 process_scan_inquiry_vpd_supported_vpd_pages(struct virtio_scsi_scan_base *base) 1223 { 1224 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1225 bool block_provisioning_page_supported = false; 1226 1227 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1228 const uint8_t *vpd_data = base->payload; 1229 const uint8_t *supported_vpd_pages = vpd_data + 4; 1230 uint16_t page_length; 1231 uint16_t num_supported_pages; 1232 uint16_t i; 1233 1234 page_length = from_be16(vpd_data + 2); 1235 num_supported_pages = spdk_min(page_length, base->iov.iov_len - 4); 1236 1237 for (i = 0; i < num_supported_pages; i++) { 1238 if (supported_vpd_pages[i] == SPDK_SPC_VPD_BLOCK_THIN_PROVISION) { 1239 block_provisioning_page_supported = true; 1240 break; 1241 } 1242 } 1243 } 1244 1245 if (block_provisioning_page_supported) { 1246 return send_inquiry_vpd(base, SPDK_SPC_VPD_BLOCK_THIN_PROVISION); 1247 } else { 1248 return send_read_cap_10(base); 1249 } 1250 } 1251 1252 static int 1253 process_scan_inquiry_vpd_block_thin_provision(struct virtio_scsi_scan_base *base) 1254 { 1255 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1256 1257 base->info.unmap_supported = false; 1258 1259 if (resp->status == SPDK_SCSI_STATUS_GOOD) { 1260 uint8_t *vpd_data = base->payload; 1261 1262 base->info.unmap_supported = !!(vpd_data[5] & SPDK_SCSI_UNMAP_LBPU); 1263 } 1264 1265 SPDK_INFOLOG(virtio, "Target %u: unmap supported = %d\n", 1266 base->info.target, (int)base->info.unmap_supported); 1267 1268 return send_read_cap_10(base); 1269 } 1270 1271 static int 1272 process_scan_inquiry(struct virtio_scsi_scan_base *base) 1273 { 1274 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1275 struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb; 1276 1277 if ((inquiry_cdb->evpd & 1) == 0) { 1278 return process_scan_inquiry_standard(base); 1279 } 1280 1281 switch (inquiry_cdb->page_code) { 1282 case SPDK_SPC_VPD_SUPPORTED_VPD_PAGES: 1283 return process_scan_inquiry_vpd_supported_vpd_pages(base); 1284 case SPDK_SPC_VPD_BLOCK_THIN_PROVISION: 1285 return process_scan_inquiry_vpd_block_thin_provision(base); 1286 default: 1287 SPDK_DEBUGLOG(virtio, "Unexpected VPD page 0x%02x\n", inquiry_cdb->page_code); 1288 return -1; 1289 } 1290 } 1291 1292 static void 1293 bdev_virtio_disk_notify_remove(struct virtio_scsi_disk *disk) 1294 { 1295 disk->removed = true; 1296 spdk_bdev_close(disk->notify_desc); 1297 } 1298 1299 static void 1300 bdev_virtio_disk_notify_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 1301 void *event_ctx) 1302 { 1303 switch (type) { 1304 case SPDK_BDEV_EVENT_REMOVE: 1305 bdev_virtio_disk_notify_remove(event_ctx); 1306 break; 1307 default: 1308 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 1309 break; 1310 } 1311 } 1312 1313 /* To be called only from the thread performing target scan */ 1314 static int 1315 virtio_scsi_dev_add_tgt(struct virtio_scsi_dev *svdev, struct virtio_scsi_scan_info *info) 1316 { 1317 struct virtio_scsi_disk *disk; 1318 struct spdk_bdev *bdev; 1319 int rc; 1320 1321 TAILQ_FOREACH(disk, &svdev->luns, link) { 1322 if (disk->info.target == info->target) { 1323 /* Target is already attached and param change is not supported */ 1324 return 0; 1325 } 1326 } 1327 1328 if (info->block_size == 0 || info->num_blocks == 0) { 1329 SPDK_ERRLOG("%s: invalid target %u: bs=%"PRIu32" blocks=%"PRIu64"\n", 1330 svdev->vdev.name, info->target, info->block_size, info->num_blocks); 1331 return -EINVAL; 1332 } 1333 1334 disk = calloc(1, sizeof(*disk)); 1335 if (disk == NULL) { 1336 SPDK_ERRLOG("could not allocate disk\n"); 1337 return -ENOMEM; 1338 } 1339 1340 disk->svdev = svdev; 1341 memcpy(&disk->info, info, sizeof(*info)); 1342 1343 bdev = &disk->bdev; 1344 bdev->name = spdk_sprintf_alloc("%st%"PRIu8, svdev->vdev.name, info->target); 1345 if (bdev->name == NULL) { 1346 SPDK_ERRLOG("Couldn't alloc memory for the bdev name.\n"); 1347 free(disk); 1348 return -ENOMEM; 1349 } 1350 1351 bdev->product_name = "Virtio SCSI Disk"; 1352 bdev->write_cache = 0; 1353 bdev->blocklen = disk->info.block_size; 1354 bdev->blockcnt = disk->info.num_blocks; 1355 1356 bdev->ctxt = disk; 1357 bdev->fn_table = &virtio_fn_table; 1358 bdev->module = &virtio_scsi_if; 1359 1360 rc = spdk_bdev_register(&disk->bdev); 1361 if (rc) { 1362 SPDK_ERRLOG("Failed to register bdev name=%s\n", disk->bdev.name); 1363 free(bdev->name); 1364 free(disk); 1365 return rc; 1366 } 1367 1368 rc = spdk_bdev_open_ext(bdev->name, false, bdev_virtio_disk_notify_event_cb, 1369 disk, &disk->notify_desc); 1370 if (rc) { 1371 assert(false); 1372 } 1373 1374 TAILQ_INSERT_TAIL(&svdev->luns, disk, link); 1375 return 0; 1376 } 1377 1378 static int 1379 process_read_cap_10(struct virtio_scsi_scan_base *base) 1380 { 1381 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1382 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1383 uint64_t max_block; 1384 uint32_t block_size; 1385 uint8_t target_id = req->lun[1]; 1386 int rc; 1387 1388 if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) { 1389 SPDK_ERRLOG("READ CAPACITY (10) failed for target %"PRIu8".\n", target_id); 1390 return -1; 1391 } 1392 1393 block_size = from_be32(base->payload + 4); 1394 max_block = from_be32(base->payload); 1395 1396 if (max_block == 0xffffffff) { 1397 return send_read_cap_16(base); 1398 } 1399 1400 base->info.num_blocks = (uint64_t)max_block + 1; 1401 base->info.block_size = block_size; 1402 1403 rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info); 1404 if (rc != 0) { 1405 return rc; 1406 } 1407 1408 return _virtio_scsi_dev_scan_next(base, 0); 1409 } 1410 1411 static int 1412 process_read_cap_16(struct virtio_scsi_scan_base *base) 1413 { 1414 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1415 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1416 uint8_t target_id = req->lun[1]; 1417 int rc; 1418 1419 if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) { 1420 SPDK_ERRLOG("READ CAPACITY (16) failed for target %"PRIu8".\n", target_id); 1421 return -1; 1422 } 1423 1424 base->info.num_blocks = from_be64(base->payload) + 1; 1425 base->info.block_size = from_be32(base->payload + 8); 1426 rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info); 1427 if (rc != 0) { 1428 return rc; 1429 } 1430 1431 return _virtio_scsi_dev_scan_next(base, 0); 1432 } 1433 1434 static void 1435 process_scan_resp(struct virtio_scsi_scan_base *base) 1436 { 1437 struct virtio_scsi_cmd_req *req = &base->io_ctx.req; 1438 struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp; 1439 int rc, sk, asc, ascq; 1440 uint8_t target_id; 1441 1442 if (base->io_ctx.iov_req.iov_len < sizeof(struct virtio_scsi_cmd_req) || 1443 base->io_ctx.iov_resp.iov_len < sizeof(struct virtio_scsi_cmd_resp)) { 1444 SPDK_ERRLOG("Received target scan message with invalid length.\n"); 1445 _virtio_scsi_dev_scan_next(base, -EIO); 1446 return; 1447 } 1448 1449 get_scsi_status(resp, &sk, &asc, &ascq); 1450 target_id = req->lun[1]; 1451 1452 if (resp->response == VIRTIO_SCSI_S_BAD_TARGET || 1453 resp->response == VIRTIO_SCSI_S_INCORRECT_LUN) { 1454 _virtio_scsi_dev_scan_next(base, -ENODEV); 1455 return; 1456 } 1457 1458 if (resp->response != VIRTIO_SCSI_S_OK || 1459 (resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION && 1460 sk != SPDK_SCSI_SENSE_ILLEGAL_REQUEST)) { 1461 assert(base->retries > 0); 1462 base->retries--; 1463 if (base->retries == 0) { 1464 SPDK_NOTICELOG("Target %"PRIu8" is present, but unavailable.\n", target_id); 1465 SPDK_LOGDUMP(virtio, "CDB", req->cdb, sizeof(req->cdb)); 1466 SPDK_LOGDUMP(virtio, "SENSE DATA", resp->sense, sizeof(resp->sense)); 1467 _virtio_scsi_dev_scan_next(base, -EBUSY); 1468 return; 1469 } 1470 1471 /* resend the same request */ 1472 rc = send_scan_io(base); 1473 if (rc != 0) { 1474 /* Let response poller do the resend */ 1475 } 1476 return; 1477 } 1478 1479 base->retries = SCAN_REQUEST_RETRIES; 1480 1481 switch (req->cdb[0]) { 1482 case SPDK_SPC_INQUIRY: 1483 rc = process_scan_inquiry(base); 1484 break; 1485 case SPDK_SPC_TEST_UNIT_READY: 1486 rc = process_scan_test_unit_ready(base); 1487 break; 1488 case SPDK_SBC_START_STOP_UNIT: 1489 rc = process_scan_start_stop_unit(base); 1490 break; 1491 case SPDK_SBC_READ_CAPACITY_10: 1492 rc = process_read_cap_10(base); 1493 break; 1494 case SPDK_SPC_SERVICE_ACTION_IN_16: 1495 rc = process_read_cap_16(base); 1496 break; 1497 default: 1498 SPDK_ERRLOG("Received invalid target scan message: cdb[0] = %"PRIu8".\n", req->cdb[0]); 1499 rc = -1; 1500 break; 1501 } 1502 1503 if (rc != 0) { 1504 if (base->needs_resend) { 1505 return; /* Let response poller do the resend */ 1506 } 1507 1508 _virtio_scsi_dev_scan_next(base, rc); 1509 } 1510 } 1511 1512 static int 1513 _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc) 1514 { 1515 struct virtio_scsi_scan_info *next; 1516 struct virtio_scsi_disk *disk; 1517 uint8_t target_id; 1518 1519 if (base->full_scan) { 1520 if (rc != 0) { 1521 disk = virtio_scsi_dev_get_disk_by_id(base->svdev, 1522 base->info.target); 1523 if (disk != NULL) { 1524 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 1525 } 1526 } 1527 1528 target_id = base->info.target + 1; 1529 if (target_id < BDEV_VIRTIO_MAX_TARGET) { 1530 _virtio_scsi_dev_scan_tgt(base, target_id); 1531 return 0; 1532 } 1533 1534 base->full_scan = false; 1535 } 1536 1537 next = TAILQ_FIRST(&base->scan_queue); 1538 if (next == NULL) { 1539 _virtio_scsi_dev_scan_finish(base, 0); 1540 return 0; 1541 } 1542 1543 TAILQ_REMOVE(&base->scan_queue, next, tailq); 1544 target_id = next->target; 1545 free(next); 1546 1547 _virtio_scsi_dev_scan_tgt(base, target_id); 1548 return 0; 1549 } 1550 1551 static int 1552 _virtio_scsi_dev_scan_init(struct virtio_scsi_dev *svdev) 1553 { 1554 struct virtio_scsi_scan_base *base; 1555 struct spdk_io_channel *io_ch; 1556 struct virtio_scsi_io_ctx *io_ctx; 1557 struct virtio_scsi_cmd_req *req; 1558 struct virtio_scsi_cmd_resp *resp; 1559 1560 io_ch = spdk_get_io_channel(svdev); 1561 if (io_ch == NULL) { 1562 return -EBUSY; 1563 } 1564 1565 base = spdk_zmalloc(sizeof(*base), 64, NULL, 1566 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 1567 if (base == NULL) { 1568 SPDK_ERRLOG("couldn't allocate memory for scsi target scan.\n"); 1569 return -ENOMEM; 1570 } 1571 1572 base->svdev = svdev; 1573 1574 base->channel = spdk_io_channel_get_ctx(io_ch); 1575 TAILQ_INIT(&base->scan_queue); 1576 svdev->scan_ctx = base; 1577 1578 base->iov.iov_base = base->payload; 1579 io_ctx = &base->io_ctx; 1580 req = &io_ctx->req; 1581 resp = &io_ctx->resp; 1582 io_ctx->iov_req.iov_base = req; 1583 io_ctx->iov_req.iov_len = sizeof(*req); 1584 io_ctx->iov_resp.iov_base = resp; 1585 io_ctx->iov_resp.iov_len = sizeof(*resp); 1586 1587 base->retries = SCAN_REQUEST_RETRIES; 1588 return 0; 1589 } 1590 1591 static void 1592 _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target) 1593 { 1594 int rc; 1595 1596 memset(&base->info, 0, sizeof(base->info)); 1597 base->info.target = target; 1598 1599 rc = send_inquiry(base); 1600 if (rc) { 1601 /* Let response poller do the resend */ 1602 } 1603 } 1604 1605 static int 1606 virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev, bdev_virtio_create_cb cb_fn, 1607 void *cb_arg) 1608 { 1609 struct virtio_scsi_scan_base *base; 1610 struct virtio_scsi_scan_info *tgt, *next_tgt; 1611 int rc; 1612 1613 if (svdev->scan_ctx) { 1614 if (svdev->scan_ctx->full_scan) { 1615 return -EEXIST; 1616 } 1617 1618 /* We're about to start a full rescan, so there's no need 1619 * to scan particular targets afterwards. 1620 */ 1621 TAILQ_FOREACH_SAFE(tgt, &svdev->scan_ctx->scan_queue, tailq, next_tgt) { 1622 TAILQ_REMOVE(&svdev->scan_ctx->scan_queue, tgt, tailq); 1623 free(tgt); 1624 } 1625 1626 svdev->scan_ctx->cb_fn = cb_fn; 1627 svdev->scan_ctx->cb_arg = cb_arg; 1628 svdev->scan_ctx->restart = true; 1629 return 0; 1630 } 1631 1632 rc = _virtio_scsi_dev_scan_init(svdev); 1633 if (rc != 0) { 1634 return rc; 1635 } 1636 1637 base = svdev->scan_ctx; 1638 base->cb_fn = cb_fn; 1639 base->cb_arg = cb_arg; 1640 base->full_scan = true; 1641 1642 _virtio_scsi_dev_scan_tgt(base, 0); 1643 return 0; 1644 } 1645 1646 static int 1647 virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target) 1648 { 1649 struct virtio_scsi_scan_base *base; 1650 struct virtio_scsi_scan_info *info; 1651 int rc; 1652 1653 base = svdev->scan_ctx; 1654 if (base) { 1655 info = calloc(1, sizeof(*info)); 1656 if (info == NULL) { 1657 SPDK_ERRLOG("calloc failed\n"); 1658 return -ENOMEM; 1659 } 1660 1661 info->target = target; 1662 TAILQ_INSERT_TAIL(&base->scan_queue, info, tailq); 1663 return 0; 1664 } 1665 1666 rc = _virtio_scsi_dev_scan_init(svdev); 1667 if (rc != 0) { 1668 return rc; 1669 } 1670 1671 base = svdev->scan_ctx; 1672 base->full_scan = true; 1673 _virtio_scsi_dev_scan_tgt(base, target); 1674 return 0; 1675 } 1676 1677 static int 1678 bdev_virtio_initialize(void) 1679 { 1680 return 0; 1681 } 1682 1683 static void 1684 _virtio_scsi_dev_unregister_cb(void *io_device) 1685 { 1686 struct virtio_scsi_dev *svdev = io_device; 1687 struct virtio_dev *vdev = &svdev->vdev; 1688 bool finish_module; 1689 bdev_virtio_remove_cb remove_cb; 1690 void *remove_ctx; 1691 1692 assert(spdk_ring_count(svdev->ctrlq_ring) == 0); 1693 spdk_ring_free(svdev->ctrlq_ring); 1694 spdk_poller_unregister(&svdev->mgmt_poller); 1695 1696 virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ); 1697 virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ); 1698 1699 virtio_dev_stop(vdev); 1700 virtio_dev_destruct(vdev); 1701 1702 pthread_mutex_lock(&g_virtio_scsi_mutex); 1703 TAILQ_REMOVE(&g_virtio_scsi_devs, svdev, tailq); 1704 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1705 1706 remove_cb = svdev->remove_cb; 1707 remove_ctx = svdev->remove_ctx; 1708 spdk_free(svdev->eventq_ios); 1709 free(svdev); 1710 1711 if (remove_cb) { 1712 remove_cb(remove_ctx, 0); 1713 } 1714 1715 finish_module = TAILQ_EMPTY(&g_virtio_scsi_devs); 1716 1717 if (g_bdev_virtio_finish && finish_module) { 1718 spdk_bdev_module_fini_done(); 1719 } 1720 } 1721 1722 static void 1723 virtio_scsi_dev_unregister_cb(void *io_device) 1724 { 1725 struct virtio_scsi_dev *svdev = io_device; 1726 struct spdk_thread *thread; 1727 1728 thread = virtio_dev_queue_get_thread(&svdev->vdev, VIRTIO_SCSI_CONTROLQ); 1729 spdk_thread_send_msg(thread, _virtio_scsi_dev_unregister_cb, io_device); 1730 } 1731 1732 static void 1733 virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev, 1734 bdev_virtio_remove_cb cb_fn, void *cb_arg) 1735 { 1736 struct virtio_scsi_disk *disk, *disk_tmp; 1737 bool do_remove = true; 1738 1739 if (svdev->removed) { 1740 if (cb_fn) { 1741 cb_fn(cb_arg, -EBUSY); 1742 } 1743 return; 1744 } 1745 1746 svdev->remove_cb = cb_fn; 1747 svdev->remove_ctx = cb_arg; 1748 svdev->removed = true; 1749 1750 if (svdev->scan_ctx) { 1751 /* The removal will continue after we receive a pending scan I/O. */ 1752 return; 1753 } 1754 1755 TAILQ_FOREACH_SAFE(disk, &svdev->luns, link, disk_tmp) { 1756 if (!disk->removed) { 1757 spdk_bdev_unregister(&disk->bdev, NULL, NULL); 1758 } 1759 do_remove = false; 1760 } 1761 1762 if (do_remove) { 1763 spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb); 1764 } 1765 } 1766 1767 static void 1768 bdev_virtio_finish(void) 1769 { 1770 struct virtio_scsi_dev *svdev, *next; 1771 1772 g_bdev_virtio_finish = true; 1773 1774 pthread_mutex_lock(&g_virtio_scsi_mutex); 1775 if (TAILQ_EMPTY(&g_virtio_scsi_devs)) { 1776 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1777 spdk_bdev_module_fini_done(); 1778 return; 1779 } 1780 1781 /* Defer module finish until all controllers are removed. */ 1782 TAILQ_FOREACH_SAFE(svdev, &g_virtio_scsi_devs, tailq, next) { 1783 virtio_scsi_dev_remove(svdev, NULL, NULL); 1784 } 1785 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1786 } 1787 1788 int 1789 bdev_virtio_user_scsi_dev_create(const char *base_name, const char *path, 1790 unsigned num_queues, unsigned queue_size, 1791 bdev_virtio_create_cb cb_fn, void *cb_arg) 1792 { 1793 struct virtio_scsi_dev *svdev; 1794 int rc; 1795 1796 svdev = virtio_user_scsi_dev_create(base_name, path, num_queues, queue_size); 1797 if (svdev == NULL) { 1798 return -1; 1799 } 1800 1801 rc = virtio_scsi_dev_scan(svdev, cb_fn, cb_arg); 1802 if (rc) { 1803 virtio_scsi_dev_remove(svdev, NULL, NULL); 1804 } 1805 1806 return rc; 1807 } 1808 1809 int 1810 bdev_vfio_user_scsi_dev_create(const char *base_name, const char *path, 1811 bdev_virtio_create_cb cb_fn, void *cb_arg) 1812 { 1813 struct virtio_scsi_dev *svdev; 1814 uint32_t num_queues = 0; 1815 int rc; 1816 1817 svdev = calloc(1, sizeof(*svdev)); 1818 if (svdev == NULL) { 1819 SPDK_ERRLOG("calloc failed for virtio device %s: %s\n", base_name, path); 1820 return -ENOMEM; 1821 } 1822 1823 rc = virtio_vfio_user_dev_init(&svdev->vdev, base_name, path); 1824 if (rc != 0) { 1825 SPDK_ERRLOG("Failed to create %s as virtio device\n", path); 1826 free(svdev); 1827 return -EFAULT; 1828 } 1829 1830 rc = virtio_dev_read_dev_config(&svdev->vdev, offsetof(struct virtio_scsi_config, num_queues), 1831 &num_queues, sizeof(num_queues)); 1832 if (rc) { 1833 SPDK_ERRLOG("%s: config read failed: %s\n", base_name, spdk_strerror(-rc)); 1834 virtio_dev_destruct(&svdev->vdev); 1835 free(svdev); 1836 return rc; 1837 } 1838 1839 if (num_queues < SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED) { 1840 SPDK_ERRLOG("%s: invalid num_queues %u\n", base_name, num_queues); 1841 virtio_dev_destruct(&svdev->vdev); 1842 free(svdev); 1843 return -EINVAL; 1844 } 1845 1846 rc = virtio_scsi_dev_init(svdev, num_queues, VIRTIO_SCSI_DEV_SUPPORTED_FEATURES); 1847 if (rc != 0) { 1848 virtio_dev_destruct(&svdev->vdev); 1849 free(svdev); 1850 return -EFAULT; 1851 } 1852 1853 rc = virtio_scsi_dev_scan(svdev, cb_fn, cb_arg); 1854 if (rc) { 1855 virtio_scsi_dev_remove(svdev, NULL, NULL); 1856 } 1857 1858 return rc; 1859 } 1860 1861 struct bdev_virtio_pci_dev_create_ctx { 1862 const char *name; 1863 bdev_virtio_create_cb cb_fn; 1864 void *cb_arg; 1865 }; 1866 1867 static int 1868 bdev_virtio_pci_scsi_dev_create_cb(struct virtio_pci_ctx *pci_ctx, void *ctx) 1869 { 1870 struct virtio_scsi_dev *svdev; 1871 struct bdev_virtio_pci_dev_create_ctx *create_ctx = ctx; 1872 int rc; 1873 1874 svdev = virtio_pci_scsi_dev_create(create_ctx->name, pci_ctx); 1875 if (svdev == NULL) { 1876 return -1; 1877 } 1878 1879 rc = virtio_scsi_dev_scan(svdev, create_ctx->cb_fn, create_ctx->cb_arg); 1880 if (rc) { 1881 svdev->vdev.ctx = NULL; 1882 virtio_scsi_dev_remove(svdev, NULL, NULL); 1883 } 1884 1885 return rc; 1886 } 1887 1888 int 1889 bdev_virtio_pci_scsi_dev_create(const char *name, struct spdk_pci_addr *pci_addr, 1890 bdev_virtio_create_cb cb_fn, void *cb_arg) 1891 { 1892 struct bdev_virtio_pci_dev_create_ctx create_ctx; 1893 1894 create_ctx.name = name; 1895 create_ctx.cb_fn = cb_fn; 1896 create_ctx.cb_arg = cb_arg; 1897 1898 return virtio_pci_dev_attach(bdev_virtio_pci_scsi_dev_create_cb, &create_ctx, 1899 VIRTIO_ID_SCSI, pci_addr); 1900 } 1901 1902 int 1903 bdev_virtio_scsi_dev_remove(const char *name, bdev_virtio_remove_cb cb_fn, void *cb_arg) 1904 { 1905 struct virtio_scsi_dev *svdev; 1906 1907 pthread_mutex_lock(&g_virtio_scsi_mutex); 1908 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 1909 if (strcmp(svdev->vdev.name, name) == 0) { 1910 break; 1911 } 1912 } 1913 1914 if (svdev == NULL) { 1915 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1916 SPDK_ERRLOG("Cannot find Virtio-SCSI device named '%s'\n", name); 1917 return -ENODEV; 1918 } 1919 1920 virtio_scsi_dev_remove(svdev, cb_fn, cb_arg); 1921 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1922 1923 return 0; 1924 } 1925 1926 void 1927 bdev_virtio_scsi_dev_list(struct spdk_json_write_ctx *w) 1928 { 1929 struct virtio_scsi_dev *svdev; 1930 1931 spdk_json_write_array_begin(w); 1932 1933 pthread_mutex_lock(&g_virtio_scsi_mutex); 1934 TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) { 1935 spdk_json_write_object_begin(w); 1936 1937 spdk_json_write_named_string(w, "name", svdev->vdev.name); 1938 1939 virtio_dev_dump_json_info(&svdev->vdev, w); 1940 1941 spdk_json_write_object_end(w); 1942 } 1943 pthread_mutex_unlock(&g_virtio_scsi_mutex); 1944 1945 spdk_json_write_array_end(w); 1946 } 1947 1948 SPDK_LOG_REGISTER_COMPONENT(virtio) 1949