1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include <linux/virtio_scsi.h> 37 38 #include "spdk/env.h" 39 #include "spdk/io_channel.h" 40 #include "spdk/scsi.h" 41 #include "spdk/scsi_spec.h" 42 #include "spdk/conf.h" 43 #include "spdk/event.h" 44 #include "spdk/util.h" 45 #include "spdk/likely.h" 46 47 #include "spdk/vhost.h" 48 #include "vhost_internal.h" 49 50 /* Features supported by SPDK VHOST lib. */ 51 #define SPDK_VHOST_SCSI_FEATURES (SPDK_VHOST_FEATURES | \ 52 (1ULL << VIRTIO_SCSI_F_INOUT) | \ 53 (1ULL << VIRTIO_SCSI_F_HOTPLUG) | \ 54 (1ULL << VIRTIO_SCSI_F_CHANGE ) | \ 55 (1ULL << VIRTIO_SCSI_F_T10_PI )) 56 57 /* Features that are specified in VIRTIO SCSI but currently not supported: 58 * - Live migration not supported yet 59 * - T10 PI 60 */ 61 #define SPDK_VHOST_SCSI_DISABLED_FEATURES (SPDK_VHOST_DISABLED_FEATURES | \ 62 (1ULL << VIRTIO_SCSI_F_T10_PI )) 63 64 #define MGMT_POLL_PERIOD_US (1000 * 5) 65 66 #define VIRTIO_SCSI_CONTROLQ 0 67 #define VIRTIO_SCSI_EVENTQ 1 68 #define VIRTIO_SCSI_REQUESTQ 2 69 70 struct spdk_scsi_dev_vhost_state { 71 bool removed; 72 spdk_vhost_event_fn remove_cb; 73 void *remove_ctx; 74 }; 75 76 struct spdk_vhost_scsi_dev { 77 struct spdk_vhost_dev vdev; 78 struct spdk_scsi_dev *scsi_dev[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS]; 79 struct spdk_scsi_dev_vhost_state scsi_dev_state[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS]; 80 81 struct spdk_poller *requestq_poller; 82 struct spdk_poller *mgmt_poller; 83 } __rte_cache_aligned; 84 85 struct spdk_vhost_scsi_task { 86 struct spdk_scsi_task scsi; 87 struct iovec iovs[SPDK_VHOST_IOVS_MAX]; 88 89 union { 90 struct virtio_scsi_cmd_resp *resp; 91 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 92 }; 93 94 struct spdk_vhost_scsi_dev *svdev; 95 struct spdk_scsi_dev *scsi_dev; 96 97 /** Number of bytes that were written. */ 98 uint32_t used_len; 99 100 int req_idx; 101 102 /* If set, the task is currently used for I/O processing. */ 103 bool used; 104 105 struct spdk_vhost_virtqueue *vq; 106 }; 107 108 static int spdk_vhost_scsi_start(struct spdk_vhost_dev *, void *); 109 static int spdk_vhost_scsi_stop(struct spdk_vhost_dev *, void *); 110 static void spdk_vhost_scsi_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w); 111 static int spdk_vhost_scsi_dev_remove(struct spdk_vhost_dev *vdev); 112 113 const struct spdk_vhost_dev_backend spdk_vhost_scsi_device_backend = { 114 .virtio_features = SPDK_VHOST_SCSI_FEATURES, 115 .disabled_features = SPDK_VHOST_SCSI_DISABLED_FEATURES, 116 .start_device = spdk_vhost_scsi_start, 117 .stop_device = spdk_vhost_scsi_stop, 118 .dump_config_json = spdk_vhost_scsi_config_json, 119 .remove_device = spdk_vhost_scsi_dev_remove, 120 }; 121 122 static void 123 spdk_vhost_scsi_task_put(struct spdk_vhost_scsi_task *task) 124 { 125 spdk_scsi_task_put(&task->scsi); 126 } 127 128 static void 129 spdk_vhost_scsi_task_free_cb(struct spdk_scsi_task *scsi_task) 130 { 131 struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi); 132 133 assert(task->svdev->vdev.task_cnt > 0); 134 task->svdev->vdev.task_cnt--; 135 task->used = false; 136 } 137 138 static void 139 process_removed_devs(struct spdk_vhost_scsi_dev *svdev) 140 { 141 struct spdk_scsi_dev *dev; 142 struct spdk_scsi_dev_vhost_state *state; 143 int i; 144 145 for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) { 146 dev = svdev->scsi_dev[i]; 147 state = &svdev->scsi_dev_state[i]; 148 149 if (dev && state->removed && !spdk_scsi_dev_has_pending_tasks(dev)) { 150 spdk_scsi_dev_free_io_channels(dev); 151 svdev->scsi_dev[i] = NULL; 152 spdk_scsi_dev_destruct(dev); 153 if (state->remove_cb) { 154 state->remove_cb(&svdev->vdev, state->remove_ctx); 155 state->remove_cb = NULL; 156 } 157 SPDK_NOTICELOG("%s: hot-detached device 'Dev %u'.\n", svdev->vdev.name, i); 158 } 159 } 160 } 161 162 static void 163 eventq_enqueue(struct spdk_vhost_scsi_dev *svdev, unsigned scsi_dev_num, uint32_t event, 164 uint32_t reason) 165 { 166 struct spdk_vhost_virtqueue *vq; 167 struct vring_desc *desc, *desc_table; 168 struct virtio_scsi_event *desc_ev; 169 uint32_t desc_table_size, req_size = 0; 170 uint16_t req; 171 int rc; 172 173 assert(scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS); 174 vq = &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ]; 175 176 if (spdk_vhost_vq_avail_ring_get(vq, &req, 1) != 1) { 177 SPDK_ERRLOG("Controller %s: Failed to send virtio event (no avail ring entries?).\n", 178 svdev->vdev.name); 179 return; 180 } 181 182 rc = spdk_vhost_vq_get_desc(&svdev->vdev, vq, req, &desc, &desc_table, &desc_table_size); 183 if (rc != 0 || desc->len < sizeof(*desc_ev)) { 184 SPDK_ERRLOG("Controller %s: Invalid eventq descriptor at index %"PRIu16".\n", 185 svdev->vdev.name, req); 186 goto out; 187 } 188 189 desc_ev = spdk_vhost_gpa_to_vva(&svdev->vdev, desc->addr); 190 if (desc_ev == NULL) { 191 SPDK_ERRLOG("Controller %s: Eventq descriptor at index %"PRIu16" points to unmapped guest memory address %p.\n", 192 svdev->vdev.name, req, (void *)(uintptr_t)desc->addr); 193 goto out; 194 } 195 196 desc_ev->event = event; 197 desc_ev->lun[0] = 1; 198 desc_ev->lun[1] = scsi_dev_num; 199 /* virtio LUN id 0 can refer either to the entire device 200 * or actual LUN 0 (the only supported by vhost for now) 201 */ 202 desc_ev->lun[2] = 0 >> 8; 203 desc_ev->lun[3] = 0 & 0xFF; 204 /* virtio doesn't specify any strict format for LUN id (bytes 2 and 3) 205 * current implementation relies on linux kernel sources 206 */ 207 memset(&desc_ev->lun[4], 0, 4); 208 desc_ev->reason = reason; 209 req_size = sizeof(*desc_ev); 210 211 out: 212 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, req, req_size); 213 } 214 215 static void 216 submit_completion(struct spdk_vhost_scsi_task *task) 217 { 218 spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx, 219 task->used_len); 220 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "Finished task (%p) req_idx=%d\n", task, task->req_idx); 221 222 spdk_vhost_scsi_task_put(task); 223 } 224 225 static void 226 spdk_vhost_scsi_task_mgmt_cpl(struct spdk_scsi_task *scsi_task) 227 { 228 struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi); 229 230 submit_completion(task); 231 } 232 233 static void 234 spdk_vhost_scsi_task_cpl(struct spdk_scsi_task *scsi_task) 235 { 236 struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi); 237 238 /* The SCSI task has completed. Do final processing and then post 239 notification to the virtqueue's "used" ring. 240 */ 241 task->resp->status = task->scsi.status; 242 243 if (task->scsi.status != SPDK_SCSI_STATUS_GOOD) { 244 memcpy(task->resp->sense, task->scsi.sense_data, task->scsi.sense_data_len); 245 task->resp->sense_len = task->scsi.sense_data_len; 246 } 247 assert(task->scsi.transfer_len == task->scsi.length); 248 task->resp->resid = task->scsi.length - task->scsi.data_transferred; 249 250 submit_completion(task); 251 } 252 253 static void 254 task_submit(struct spdk_vhost_scsi_task *task) 255 { 256 task->resp->response = VIRTIO_SCSI_S_OK; 257 spdk_scsi_dev_queue_task(task->scsi_dev, &task->scsi); 258 } 259 260 static void 261 mgmt_task_submit(struct spdk_vhost_scsi_task *task, enum spdk_scsi_task_func func) 262 { 263 task->tmf_resp->response = VIRTIO_SCSI_S_OK; 264 spdk_scsi_dev_queue_mgmt_task(task->scsi_dev, &task->scsi, func); 265 } 266 267 static void 268 invalid_request(struct spdk_vhost_scsi_task *task) 269 { 270 spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx, 271 task->used_len); 272 spdk_vhost_scsi_task_put(task); 273 274 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "Invalid request (status=%" PRIu8")\n", 275 task->resp ? task->resp->response : -1); 276 } 277 278 static int 279 spdk_vhost_scsi_task_init_target(struct spdk_vhost_scsi_task *task, const __u8 *lun) 280 { 281 struct spdk_scsi_dev *dev; 282 uint16_t lun_id = (((uint16_t)lun[2] << 8) | lun[3]) & 0x3FFF; 283 284 SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_QUEUE, "LUN", lun, 8); 285 286 /* First byte must be 1 and second is target */ 287 if (lun[0] != 1 || lun[1] >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) { 288 return -1; 289 } 290 291 dev = task->svdev->scsi_dev[lun[1]]; 292 task->scsi_dev = dev; 293 if (dev == NULL || task->svdev->scsi_dev_state[lun[1]].removed) { 294 /* If dev has been hotdetached, return 0 to allow sending 295 * additional hotremove event via sense codes. 296 */ 297 return task->svdev->scsi_dev_state[lun[1]].removed ? 0 : -1; 298 } 299 300 task->scsi.target_port = spdk_scsi_dev_find_port_by_id(task->scsi_dev, 0); 301 task->scsi.lun = spdk_scsi_dev_get_lun(dev, lun_id); 302 return 0; 303 } 304 305 static void 306 process_ctrl_request(struct spdk_vhost_scsi_task *task) 307 { 308 struct spdk_vhost_dev *vdev = &task->svdev->vdev; 309 struct vring_desc *desc, *desc_table; 310 struct virtio_scsi_ctrl_tmf_req *ctrl_req; 311 struct virtio_scsi_ctrl_an_resp *an_resp; 312 uint32_t desc_table_size, used_len = 0; 313 int rc; 314 315 spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_mgmt_cpl, spdk_vhost_scsi_task_free_cb); 316 rc = spdk_vhost_vq_get_desc(vdev, task->vq, task->req_idx, &desc, &desc_table, &desc_table_size); 317 if (spdk_unlikely(rc != 0)) { 318 SPDK_ERRLOG("%s: Invalid controlq descriptor at index %d.\n", 319 vdev->name, task->req_idx); 320 goto out; 321 } 322 323 ctrl_req = spdk_vhost_gpa_to_vva(vdev, desc->addr); 324 325 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, 326 "Processing controlq descriptor: desc %d/%p, desc_addr %p, len %d, flags %d, last_used_idx %d; kickfd %d; size %d\n", 327 task->req_idx, desc, (void *)desc->addr, desc->len, desc->flags, task->vq->vring.last_used_idx, 328 task->vq->vring.kickfd, task->vq->vring.size); 329 SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_QUEUE, "Request descriptor", (uint8_t *)ctrl_req, 330 desc->len); 331 332 spdk_vhost_scsi_task_init_target(task, ctrl_req->lun); 333 334 spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_size); 335 if (spdk_unlikely(desc == NULL)) { 336 SPDK_ERRLOG("%s: No response descriptor for controlq request %d.\n", 337 vdev->name, task->req_idx); 338 goto out; 339 } 340 341 /* Process the TMF request */ 342 switch (ctrl_req->type) { 343 case VIRTIO_SCSI_T_TMF: 344 task->tmf_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr); 345 if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_ctrl_tmf_resp) || task->tmf_resp == NULL)) { 346 SPDK_ERRLOG("%s: TMF response descriptor at index %d points to invalid guest memory region\n", 347 vdev->name, task->req_idx); 348 goto out; 349 } 350 351 /* Check if we are processing a valid request */ 352 if (task->scsi_dev == NULL) { 353 task->tmf_resp->response = VIRTIO_SCSI_S_BAD_TARGET; 354 break; 355 } 356 357 switch (ctrl_req->subtype) { 358 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET: 359 /* Handle LUN reset */ 360 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "LUN reset\n"); 361 362 mgmt_task_submit(task, SPDK_SCSI_TASK_FUNC_LUN_RESET); 363 return; 364 default: 365 task->tmf_resp->response = VIRTIO_SCSI_S_ABORTED; 366 /* Unsupported command */ 367 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "Unsupported TMF command %x\n", ctrl_req->subtype); 368 break; 369 } 370 break; 371 case VIRTIO_SCSI_T_AN_QUERY: 372 case VIRTIO_SCSI_T_AN_SUBSCRIBE: { 373 an_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr); 374 if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_ctrl_an_resp) || an_resp == NULL)) { 375 SPDK_WARNLOG("%s: Asynchronous response descriptor points to invalid guest memory region\n", 376 vdev->name); 377 goto out; 378 } 379 380 an_resp->response = VIRTIO_SCSI_S_ABORTED; 381 break; 382 } 383 default: 384 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "Unsupported control command %x\n", ctrl_req->type); 385 break; 386 } 387 388 used_len = sizeof(struct virtio_scsi_ctrl_tmf_resp); 389 out: 390 spdk_vhost_vq_used_ring_enqueue(vdev, task->vq, task->req_idx, used_len); 391 spdk_vhost_scsi_task_put(task); 392 } 393 394 /* 395 * Process task's descriptor chain and setup data related fields. 396 * Return 397 * -1 if request is invalid and must be aborted, 398 * 0 if all data are set. 399 */ 400 static int 401 task_data_setup(struct spdk_vhost_scsi_task *task, 402 struct virtio_scsi_cmd_req **req) 403 { 404 struct spdk_vhost_dev *vdev = &task->svdev->vdev; 405 struct vring_desc *desc, *desc_table; 406 struct iovec *iovs = task->iovs; 407 uint16_t iovcnt = 0; 408 uint32_t desc_table_len, len = 0; 409 int rc; 410 411 spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_cpl, spdk_vhost_scsi_task_free_cb); 412 413 rc = spdk_vhost_vq_get_desc(vdev, task->vq, task->req_idx, &desc, &desc_table, &desc_table_len); 414 /* First descriptor must be readable */ 415 if (spdk_unlikely(rc != 0 || spdk_vhost_vring_desc_is_wr(desc) || 416 desc->len < sizeof(struct virtio_scsi_cmd_req))) { 417 SPDK_WARNLOG("%s: invalid first (request) descriptor at index %"PRIu16".\n", 418 vdev->name, task->req_idx); 419 goto invalid_task; 420 } 421 422 *req = spdk_vhost_gpa_to_vva(vdev, desc->addr); 423 if (spdk_unlikely(*req == NULL)) { 424 SPDK_WARNLOG("%s: Request descriptor at index %d points to invalid guest memory region\n", 425 vdev->name, task->req_idx); 426 goto invalid_task; 427 } 428 429 /* Each request must have at least 2 descriptors (e.g. request and response) */ 430 spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len); 431 if (desc == NULL) { 432 SPDK_WARNLOG("%s: Descriptor chain at index %d contains neither payload nor response buffer.\n", 433 vdev->name, task->req_idx); 434 goto invalid_task; 435 } 436 task->scsi.dxfer_dir = spdk_vhost_vring_desc_is_wr(desc) ? SPDK_SCSI_DIR_FROM_DEV : 437 SPDK_SCSI_DIR_TO_DEV; 438 task->scsi.iovs = iovs; 439 440 if (task->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV) { 441 /* 442 * FROM_DEV (READ): [RD_req][WR_resp][WR_buf0]...[WR_bufN] 443 */ 444 task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr); 445 if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_cmd_resp) || task->resp == NULL)) { 446 SPDK_WARNLOG("%s: Response descriptor at index %d points to invalid guest memory region\n", 447 vdev->name, task->req_idx); 448 goto invalid_task; 449 } 450 rc = spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len); 451 if (spdk_unlikely(rc != 0)) { 452 SPDK_WARNLOG("%s: invalid descriptor chain at request index %d (descriptor id overflow?).\n", 453 vdev->name, task->req_idx); 454 goto invalid_task; 455 } 456 457 if (desc == NULL) { 458 /* 459 * TEST UNIT READY command and some others might not contain any payload and this is not an error. 460 */ 461 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA, 462 "No payload descriptors for FROM DEV command req_idx=%"PRIu16".\n", task->req_idx); 463 SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_DATA, "CDB=", (*req)->cdb, VIRTIO_SCSI_CDB_SIZE); 464 task->used_len = sizeof(struct virtio_scsi_cmd_resp); 465 task->scsi.iovcnt = 1; 466 task->scsi.iovs[0].iov_len = 0; 467 task->scsi.length = 0; 468 task->scsi.transfer_len = 0; 469 return 0; 470 } 471 472 /* All remaining descriptors are data. */ 473 while (desc) { 474 if (spdk_unlikely(!spdk_vhost_vring_desc_is_wr(desc))) { 475 SPDK_WARNLOG("FROM DEV cmd: descriptor nr %" PRIu16" in payload chain is read only.\n", iovcnt); 476 goto invalid_task; 477 } 478 479 if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) { 480 goto invalid_task; 481 } 482 len += desc->len; 483 484 rc = spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len); 485 if (spdk_unlikely(rc != 0)) { 486 SPDK_WARNLOG("%s: invalid payload in descriptor chain starting at index %d.\n", 487 vdev->name, task->req_idx); 488 goto invalid_task; 489 } 490 } 491 492 task->used_len = sizeof(struct virtio_scsi_cmd_resp) + len; 493 } else { 494 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA, "TO DEV"); 495 /* 496 * TO_DEV (WRITE):[RD_req][RD_buf0]...[RD_bufN][WR_resp] 497 * No need to check descriptor WR flag as this is done while setting scsi.dxfer_dir. 498 */ 499 500 /* Process descriptors up to response. */ 501 while (!spdk_vhost_vring_desc_is_wr(desc)) { 502 if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) { 503 goto invalid_task; 504 } 505 len += desc->len; 506 507 spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len); 508 if (spdk_unlikely(desc == NULL)) { 509 SPDK_WARNLOG("TO_DEV cmd: no response descriptor.\n"); 510 goto invalid_task; 511 } 512 } 513 514 task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr); 515 if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_cmd_resp) || task->resp == NULL)) { 516 SPDK_WARNLOG("%s: Response descriptor at index %d points to invalid guest memory region\n", 517 vdev->name, task->req_idx); 518 goto invalid_task; 519 } 520 521 task->used_len = sizeof(struct virtio_scsi_cmd_resp); 522 } 523 524 task->scsi.iovcnt = iovcnt; 525 task->scsi.length = len; 526 task->scsi.transfer_len = len; 527 return 0; 528 529 invalid_task: 530 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA, "%s: Invalid task at index %"PRIu16".\n", 531 vdev->name, task->req_idx); 532 return -1; 533 } 534 535 static int 536 process_request(struct spdk_vhost_scsi_task *task) 537 { 538 struct virtio_scsi_cmd_req *req; 539 int result; 540 541 result = task_data_setup(task, &req); 542 if (result) { 543 return result; 544 } 545 546 result = spdk_vhost_scsi_task_init_target(task, req->lun); 547 if (spdk_unlikely(result != 0)) { 548 task->resp->response = VIRTIO_SCSI_S_BAD_TARGET; 549 return -1; 550 } 551 552 task->scsi.cdb = req->cdb; 553 SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_DATA, "request CDB", req->cdb, VIRTIO_SCSI_CDB_SIZE); 554 555 if (spdk_unlikely(task->scsi.lun == NULL)) { 556 spdk_scsi_task_process_null_lun(&task->scsi); 557 task->resp->response = VIRTIO_SCSI_S_OK; 558 return 1; 559 } 560 561 return 0; 562 } 563 564 static void 565 process_controlq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq) 566 { 567 struct spdk_vhost_scsi_task *task; 568 uint16_t reqs[32]; 569 uint16_t reqs_cnt, i; 570 571 reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs)); 572 for (i = 0; i < reqs_cnt; i++) { 573 if (spdk_unlikely(reqs[i] >= vq->vring.size)) { 574 SPDK_ERRLOG("%s: invalid entry in avail ring. Buffer '%"PRIu16"' exceeds virtqueue size (%"PRIu16")\n", 575 svdev->vdev.name, reqs[i], vq->vring.size); 576 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0); 577 continue; 578 } 579 580 task = &((struct spdk_vhost_scsi_task *)vq->tasks)[reqs[i]]; 581 if (spdk_unlikely(task->used)) { 582 SPDK_ERRLOG("%s: invalid entry in avail ring. Buffer '%"PRIu16"' is still in use!\n", 583 svdev->vdev.name, reqs[i]); 584 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0); 585 continue; 586 } 587 588 svdev->vdev.task_cnt++; 589 memset(&task->scsi, 0, sizeof(task->scsi)); 590 task->tmf_resp = NULL; 591 task->used = true; 592 process_ctrl_request(task); 593 } 594 } 595 596 static void 597 process_requestq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq) 598 { 599 struct spdk_vhost_scsi_task *task; 600 uint16_t reqs[32]; 601 uint16_t reqs_cnt, i; 602 int result; 603 604 reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs)); 605 assert(reqs_cnt <= 32); 606 607 for (i = 0; i < reqs_cnt; i++) { 608 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Starting processing request idx %"PRIu16"======\n", 609 reqs[i]); 610 611 if (spdk_unlikely(reqs[i] >= vq->vring.size)) { 612 SPDK_ERRLOG("%s: request idx '%"PRIu16"' exceeds virtqueue size (%"PRIu16").\n", 613 svdev->vdev.name, reqs[i], vq->vring.size); 614 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0); 615 continue; 616 } 617 618 task = &((struct spdk_vhost_scsi_task *)vq->tasks)[reqs[i]]; 619 if (spdk_unlikely(task->used)) { 620 SPDK_ERRLOG("%s: request with idx '%"PRIu16"' is already pending.\n", 621 svdev->vdev.name, reqs[i]); 622 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0); 623 continue; 624 } 625 626 svdev->vdev.task_cnt++; 627 memset(&task->scsi, 0, sizeof(task->scsi)); 628 task->resp = NULL; 629 task->used = true; 630 task->used_len = 0; 631 result = process_request(task); 632 if (likely(result == 0)) { 633 task_submit(task); 634 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d submitted ======\n", task, 635 task->req_idx); 636 } else if (result > 0) { 637 spdk_vhost_scsi_task_cpl(&task->scsi); 638 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d finished early ======\n", task, 639 task->req_idx); 640 } else { 641 invalid_request(task); 642 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d failed ======\n", task, 643 task->req_idx); 644 } 645 } 646 } 647 648 static int 649 vdev_mgmt_worker(void *arg) 650 { 651 struct spdk_vhost_scsi_dev *svdev = arg; 652 653 process_removed_devs(svdev); 654 spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ]); 655 656 process_controlq(svdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]); 657 spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]); 658 659 return -1; 660 } 661 662 static int 663 vdev_worker(void *arg) 664 { 665 struct spdk_vhost_scsi_dev *svdev = arg; 666 uint32_t q_idx; 667 668 for (q_idx = VIRTIO_SCSI_REQUESTQ; q_idx < svdev->vdev.num_queues; q_idx++) { 669 process_requestq(svdev, &svdev->vdev.virtqueue[q_idx]); 670 } 671 672 spdk_vhost_dev_used_signal(&svdev->vdev); 673 674 return -1; 675 } 676 677 static struct spdk_vhost_scsi_dev * 678 to_scsi_dev(struct spdk_vhost_dev *ctrlr) 679 { 680 if (ctrlr == NULL) { 681 return NULL; 682 } 683 684 if (ctrlr->backend != &spdk_vhost_scsi_device_backend) { 685 SPDK_ERRLOG("%s: not a vhost-scsi device.\n", ctrlr->name); 686 return NULL; 687 } 688 689 return SPDK_CONTAINEROF(ctrlr, struct spdk_vhost_scsi_dev, vdev); 690 } 691 692 int 693 spdk_vhost_scsi_dev_construct(const char *name, const char *cpumask) 694 { 695 struct spdk_vhost_scsi_dev *svdev = spdk_dma_zmalloc(sizeof(struct spdk_vhost_scsi_dev), 696 SPDK_CACHE_LINE_SIZE, NULL); 697 int rc; 698 699 if (svdev == NULL) { 700 return -ENOMEM; 701 } 702 703 spdk_vhost_lock(); 704 rc = spdk_vhost_dev_register(&svdev->vdev, name, cpumask, 705 &spdk_vhost_scsi_device_backend); 706 707 if (rc) { 708 spdk_dma_free(svdev); 709 } 710 711 spdk_vhost_unlock(); 712 return rc; 713 } 714 715 static int 716 spdk_vhost_scsi_dev_remove(struct spdk_vhost_dev *vdev) 717 { 718 struct spdk_vhost_scsi_dev *svdev = to_scsi_dev(vdev); 719 int rc, i; 720 721 if (svdev == NULL) { 722 return -EINVAL; 723 } 724 725 for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) { 726 if (svdev->scsi_dev[i]) { 727 if (vdev->registered) { 728 SPDK_ERRLOG("Trying to remove non-empty controller: %s.\n", vdev->name); 729 return -EBUSY; 730 } 731 732 rc = spdk_vhost_scsi_dev_remove_tgt(vdev, i, NULL, NULL); 733 if (rc != 0) { 734 SPDK_ERRLOG("%s: failed to force-remove target %d\n", vdev->name, i); 735 return rc; 736 } 737 } 738 } 739 740 rc = spdk_vhost_dev_unregister(vdev); 741 if (rc != 0) { 742 return rc; 743 } 744 745 spdk_dma_free(svdev); 746 return 0; 747 } 748 749 struct spdk_scsi_dev * 750 spdk_vhost_scsi_dev_get_tgt(struct spdk_vhost_dev *vdev, uint8_t num) 751 { 752 struct spdk_vhost_scsi_dev *svdev; 753 754 assert(num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS); 755 svdev = to_scsi_dev(vdev); 756 757 return svdev ? svdev->scsi_dev[num] : NULL; 758 } 759 760 static void 761 spdk_vhost_scsi_lun_hotremove(const struct spdk_scsi_lun *lun, void *arg) 762 { 763 struct spdk_vhost_scsi_dev *svdev = arg; 764 const struct spdk_scsi_dev *scsi_dev; 765 unsigned scsi_dev_num; 766 767 assert(lun != NULL); 768 assert(svdev != NULL); 769 if (svdev->vdev.lcore != -1 && 770 !spdk_vhost_dev_has_feature(&svdev->vdev, VIRTIO_SCSI_F_HOTPLUG)) { 771 SPDK_WARNLOG("%s: hotremove is not enabled for this controller.\n", svdev->vdev.name); 772 return; 773 } 774 775 scsi_dev = spdk_scsi_lun_get_dev(lun); 776 for (scsi_dev_num = 0; scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; scsi_dev_num++) { 777 if (svdev->scsi_dev[scsi_dev_num] == scsi_dev) { 778 break; 779 } 780 } 781 782 if (scsi_dev_num == SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) { 783 /* The entire device has been already removed. */ 784 return; 785 } 786 787 /* remove entire device */ 788 spdk_vhost_scsi_dev_remove_tgt(&svdev->vdev, scsi_dev_num, NULL, NULL); 789 } 790 791 int 792 spdk_vhost_scsi_dev_add_tgt(struct spdk_vhost_dev *vdev, unsigned scsi_tgt_num, 793 const char *bdev_name) 794 { 795 struct spdk_vhost_scsi_dev *svdev; 796 char target_name[SPDK_SCSI_DEV_MAX_NAME]; 797 int lun_id_list[1]; 798 const char *bdev_names_list[1]; 799 800 svdev = to_scsi_dev(vdev); 801 if (svdev == NULL) { 802 return -EINVAL; 803 } 804 805 if (scsi_tgt_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) { 806 SPDK_ERRLOG("Controller %d target number too big (max %d)\n", scsi_tgt_num, 807 SPDK_VHOST_SCSI_CTRLR_MAX_DEVS); 808 return -EINVAL; 809 } 810 811 if (bdev_name == NULL) { 812 SPDK_ERRLOG("No lun name specified\n"); 813 return -EINVAL; 814 } 815 816 if (vdev->lcore != -1 && !spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { 817 SPDK_ERRLOG("Controller %s is in use and hotplug is not supported\n", vdev->name); 818 return -ENOTSUP; 819 } 820 821 if (svdev->scsi_dev[scsi_tgt_num] != NULL) { 822 SPDK_ERRLOG("Controller %s target %u already occupied\n", vdev->name, scsi_tgt_num); 823 return -EEXIST; 824 } 825 826 /* 827 * At this stage only one LUN per target 828 */ 829 snprintf(target_name, sizeof(target_name), "Target %u", scsi_tgt_num); 830 lun_id_list[0] = 0; 831 bdev_names_list[0] = (char *)bdev_name; 832 833 svdev->scsi_dev_state[scsi_tgt_num].removed = false; 834 svdev->scsi_dev[scsi_tgt_num] = spdk_scsi_dev_construct(target_name, bdev_names_list, lun_id_list, 835 1, 836 SPDK_SPC_PROTOCOL_IDENTIFIER_SAS, spdk_vhost_scsi_lun_hotremove, svdev); 837 838 if (svdev->scsi_dev[scsi_tgt_num] == NULL) { 839 SPDK_ERRLOG("Couldn't create spdk SCSI target '%s' using bdev '%s' in controller: %s\n", 840 target_name, bdev_name, vdev->name); 841 return -EINVAL; 842 } 843 spdk_scsi_dev_add_port(svdev->scsi_dev[scsi_tgt_num], 0, "vhost"); 844 845 if (vdev->lcore != -1) { 846 spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[scsi_tgt_num]); 847 eventq_enqueue(svdev, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_RESCAN); 848 } 849 850 SPDK_NOTICELOG("Controller %s: defined target '%s' using bdev '%s'\n", 851 vdev->name, target_name, bdev_name); 852 return 0; 853 } 854 855 int 856 spdk_vhost_scsi_dev_remove_tgt(struct spdk_vhost_dev *vdev, unsigned scsi_tgt_num, 857 spdk_vhost_event_fn cb_fn, void *cb_arg) 858 { 859 struct spdk_vhost_scsi_dev *svdev; 860 struct spdk_scsi_dev *scsi_dev; 861 struct spdk_scsi_dev_vhost_state *scsi_dev_state; 862 int rc = 0; 863 864 if (scsi_tgt_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) { 865 SPDK_ERRLOG("%s: invalid target number %d\n", vdev->name, scsi_tgt_num); 866 return -EINVAL; 867 } 868 869 svdev = to_scsi_dev(vdev); 870 if (svdev == NULL) { 871 return -ENODEV; 872 } 873 874 scsi_dev = svdev->scsi_dev[scsi_tgt_num]; 875 if (scsi_dev == NULL) { 876 SPDK_ERRLOG("Controller %s target %u is not occupied\n", vdev->name, scsi_tgt_num); 877 return -ENODEV; 878 } 879 880 if (svdev->vdev.lcore == -1) { 881 /* controller is not in use, remove dev and exit */ 882 svdev->scsi_dev[scsi_tgt_num] = NULL; 883 spdk_scsi_dev_destruct(scsi_dev); 884 if (cb_fn) { 885 rc = cb_fn(vdev, cb_arg); 886 } 887 SPDK_NOTICELOG("%s: removed target 'Target %u'\n", vdev->name, scsi_tgt_num); 888 return rc; 889 } 890 891 if (!spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { 892 SPDK_WARNLOG("%s: 'Target %u' is in use and hot-detach is not enabled for this controller.\n", 893 svdev->vdev.name, scsi_tgt_num); 894 return -ENOTSUP; 895 } 896 897 scsi_dev_state = &svdev->scsi_dev_state[scsi_tgt_num]; 898 if (scsi_dev_state->removed) { 899 SPDK_WARNLOG("%s: 'Target %u' has been already marked to hotremove.\n", svdev->vdev.name, 900 scsi_tgt_num); 901 return -EBUSY; 902 } 903 904 scsi_dev_state->remove_cb = cb_fn; 905 scsi_dev_state->remove_ctx = cb_arg; 906 scsi_dev_state->removed = true; 907 eventq_enqueue(svdev, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_REMOVED); 908 909 SPDK_NOTICELOG("%s: queued 'Target %u' for hot-detach.\n", vdev->name, scsi_tgt_num); 910 return 0; 911 } 912 913 int 914 spdk_vhost_scsi_controller_construct(void) 915 { 916 struct spdk_conf_section *sp = spdk_conf_first_section(NULL); 917 struct spdk_vhost_dev *vdev; 918 int i, dev_num; 919 unsigned ctrlr_num = 0; 920 char *bdev_name, *tgt_num_str; 921 char *cpumask; 922 char *name; 923 char *keyword; 924 char *dev = NULL, *tgt = NULL; 925 926 while (sp != NULL) { 927 if (!spdk_conf_section_match_prefix(sp, "VhostScsi")) { 928 sp = spdk_conf_next_section(sp); 929 continue; 930 } 931 932 if (sscanf(spdk_conf_section_get_name(sp), "VhostScsi%u", &ctrlr_num) != 1) { 933 SPDK_ERRLOG("Section '%s' has non-numeric suffix.\n", 934 spdk_conf_section_get_name(sp)); 935 return -1; 936 } 937 938 name = spdk_conf_section_get_val(sp, "Name"); 939 cpumask = spdk_conf_section_get_val(sp, "Cpumask"); 940 941 if (spdk_vhost_scsi_dev_construct(name, cpumask) < 0) { 942 return -1; 943 } 944 945 vdev = spdk_vhost_dev_find(name); 946 assert(vdev); 947 948 dev = spdk_conf_section_get_nval(sp, "Dev", 0); 949 tgt = spdk_conf_section_get_nval(sp, "Target", 0); 950 951 if (dev && tgt) { 952 SPDK_ERRLOG("Used both 'Dev' and 'Target' keywords in section [VhostScsi%u]\n" 953 "Please use one.\n", ctrlr_num); 954 return -1; 955 } else if (dev) { 956 SPDK_NOTICELOG("'Dev' mnemonic is deprecated, and will be removed shortly.\n" 957 "Please, use 'Target' instead\n"); 958 keyword = "Dev"; 959 } else { 960 keyword = "Target"; 961 } 962 963 for (i = 0; ; i++) { 964 965 tgt = spdk_conf_section_get_nval(sp, keyword, i); 966 if (tgt == NULL) { 967 break; 968 } 969 970 tgt_num_str = spdk_conf_section_get_nmval(sp, keyword, i, 0); 971 if (tgt_num_str == NULL) { 972 SPDK_ERRLOG("%s: Invalid or missing target number\n", name); 973 return -1; 974 } 975 976 dev_num = (int)strtol(tgt_num_str, NULL, 10); 977 bdev_name = spdk_conf_section_get_nmval(sp, keyword, i, 1); 978 if (bdev_name == NULL) { 979 SPDK_ERRLOG("%s: Invalid or missing bdev name for target %d\n", name, dev_num); 980 return -1; 981 } else if (spdk_conf_section_get_nmval(sp, keyword, i, 2)) { 982 SPDK_ERRLOG("%s: Only one LUN per vhost SCSI device supported\n", name); 983 return -1; 984 } 985 986 if (spdk_vhost_scsi_dev_add_tgt(vdev, dev_num, bdev_name) < 0) { 987 return -1; 988 } 989 } 990 991 sp = spdk_conf_next_section(sp); 992 } 993 994 return 0; 995 } 996 997 static void 998 free_task_pool(struct spdk_vhost_scsi_dev *svdev) 999 { 1000 struct spdk_vhost_virtqueue *vq; 1001 uint16_t i; 1002 1003 for (i = 0; i < svdev->vdev.num_queues; i++) { 1004 vq = &svdev->vdev.virtqueue[i]; 1005 if (vq->tasks == NULL) { 1006 continue; 1007 } 1008 1009 spdk_dma_free(vq->tasks); 1010 vq->tasks = NULL; 1011 } 1012 } 1013 1014 static int 1015 alloc_task_pool(struct spdk_vhost_scsi_dev *svdev) 1016 { 1017 struct spdk_vhost_virtqueue *vq; 1018 struct spdk_vhost_scsi_task *task; 1019 uint32_t task_cnt; 1020 uint16_t i; 1021 uint32_t j; 1022 1023 for (i = 0; i < svdev->vdev.num_queues; i++) { 1024 vq = &svdev->vdev.virtqueue[i]; 1025 task_cnt = vq->vring.size; 1026 if (task_cnt > SPDK_VHOST_MAX_VQ_SIZE) { 1027 /* sanity check */ 1028 SPDK_ERRLOG("Controller %s: virtuque %"PRIu16" is too big. (size = %"PRIu32", max = %"PRIu32")\n", 1029 svdev->vdev.name, i, task_cnt, SPDK_VHOST_MAX_VQ_SIZE); 1030 free_task_pool(svdev); 1031 return -1; 1032 } 1033 vq->tasks = spdk_dma_zmalloc(sizeof(struct spdk_vhost_scsi_task) * task_cnt, 1034 SPDK_CACHE_LINE_SIZE, NULL); 1035 if (vq->tasks == NULL) { 1036 SPDK_ERRLOG("Controller %s: failed to allocate %"PRIu32" tasks for virtqueue %"PRIu16"\n", 1037 svdev->vdev.name, task_cnt, i); 1038 free_task_pool(svdev); 1039 return -1; 1040 } 1041 1042 for (j = 0; j < task_cnt; j++) { 1043 task = &((struct spdk_vhost_scsi_task *)vq->tasks)[j]; 1044 task->svdev = svdev; 1045 task->vq = vq; 1046 task->req_idx = j; 1047 } 1048 } 1049 1050 return 0; 1051 } 1052 1053 /* 1054 * A new device is added to a data core. First the device is added to the main linked list 1055 * and then allocated to a specific data core. 1056 */ 1057 static int 1058 spdk_vhost_scsi_start(struct spdk_vhost_dev *vdev, void *event_ctx) 1059 { 1060 struct spdk_vhost_scsi_dev *svdev; 1061 uint32_t i; 1062 int rc; 1063 1064 svdev = to_scsi_dev(vdev); 1065 if (svdev == NULL) { 1066 SPDK_ERRLOG("Trying to start non-scsi controller as a scsi one.\n"); 1067 rc = -1; 1068 goto out; 1069 } 1070 1071 rc = alloc_task_pool(svdev); 1072 if (rc != 0) { 1073 SPDK_ERRLOG("%s: failed to alloc task pool.\n", vdev->name); 1074 goto out; 1075 } 1076 1077 for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) { 1078 if (svdev->scsi_dev[i] == NULL) { 1079 continue; 1080 } 1081 spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[i]); 1082 } 1083 SPDK_NOTICELOG("Started poller for vhost controller %s on lcore %d\n", vdev->name, vdev->lcore); 1084 1085 spdk_vhost_dev_mem_register(vdev); 1086 1087 svdev->requestq_poller = spdk_poller_register(vdev_worker, svdev, 0); 1088 svdev->mgmt_poller = spdk_poller_register(vdev_mgmt_worker, svdev, 1089 MGMT_POLL_PERIOD_US); 1090 out: 1091 spdk_vhost_dev_backend_event_done(event_ctx, rc); 1092 return rc; 1093 } 1094 1095 struct spdk_vhost_dev_destroy_ctx { 1096 struct spdk_vhost_scsi_dev *svdev; 1097 struct spdk_poller *poller; 1098 void *event_ctx; 1099 }; 1100 1101 static int 1102 destroy_device_poller_cb(void *arg) 1103 { 1104 struct spdk_vhost_dev_destroy_ctx *ctx = arg; 1105 struct spdk_vhost_scsi_dev *svdev = ctx->svdev; 1106 uint32_t i; 1107 1108 if (svdev->vdev.task_cnt > 0) { 1109 return -1; 1110 } 1111 1112 1113 for (i = 0; i < svdev->vdev.num_queues; i++) { 1114 spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[i]); 1115 } 1116 1117 for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) { 1118 if (svdev->scsi_dev[i] == NULL) { 1119 continue; 1120 } 1121 spdk_scsi_dev_free_io_channels(svdev->scsi_dev[i]); 1122 } 1123 1124 SPDK_NOTICELOG("Stopping poller for vhost controller %s\n", svdev->vdev.name); 1125 spdk_vhost_dev_mem_unregister(&svdev->vdev); 1126 1127 free_task_pool(svdev); 1128 1129 spdk_poller_unregister(&ctx->poller); 1130 spdk_vhost_dev_backend_event_done(ctx->event_ctx, 0); 1131 spdk_dma_free(ctx); 1132 1133 return -1; 1134 } 1135 1136 static int 1137 spdk_vhost_scsi_stop(struct spdk_vhost_dev *vdev, void *event_ctx) 1138 { 1139 struct spdk_vhost_scsi_dev *svdev; 1140 struct spdk_vhost_dev_destroy_ctx *destroy_ctx; 1141 1142 svdev = to_scsi_dev(vdev); 1143 if (svdev == NULL) { 1144 SPDK_ERRLOG("Trying to stop non-scsi controller as a scsi one.\n"); 1145 goto err; 1146 } 1147 1148 destroy_ctx = spdk_dma_zmalloc(sizeof(*destroy_ctx), SPDK_CACHE_LINE_SIZE, NULL); 1149 if (destroy_ctx == NULL) { 1150 SPDK_ERRLOG("Failed to alloc memory for destroying device.\n"); 1151 goto err; 1152 } 1153 1154 destroy_ctx->svdev = svdev; 1155 destroy_ctx->event_ctx = event_ctx; 1156 1157 spdk_poller_unregister(&svdev->requestq_poller); 1158 spdk_poller_unregister(&svdev->mgmt_poller); 1159 destroy_ctx->poller = spdk_poller_register(destroy_device_poller_cb, destroy_ctx, 1160 1000); 1161 1162 return 0; 1163 1164 err: 1165 spdk_vhost_dev_backend_event_done(event_ctx, -1); 1166 return -1; 1167 } 1168 1169 static void 1170 spdk_vhost_scsi_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w) 1171 { 1172 struct spdk_scsi_dev *sdev; 1173 struct spdk_scsi_lun *lun; 1174 uint32_t dev_idx; 1175 uint32_t lun_idx; 1176 1177 assert(vdev != NULL); 1178 spdk_json_write_name(w, "scsi"); 1179 spdk_json_write_array_begin(w); 1180 for (dev_idx = 0; dev_idx < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; dev_idx++) { 1181 sdev = spdk_vhost_scsi_dev_get_tgt(vdev, dev_idx); 1182 if (!sdev) { 1183 continue; 1184 } 1185 1186 spdk_json_write_object_begin(w); 1187 1188 spdk_json_write_name(w, "scsi_dev_num"); 1189 spdk_json_write_uint32(w, dev_idx); 1190 1191 spdk_json_write_name(w, "id"); 1192 spdk_json_write_int32(w, spdk_scsi_dev_get_id(sdev)); 1193 1194 spdk_json_write_name(w, "target_name"); 1195 spdk_json_write_string(w, spdk_scsi_dev_get_name(sdev)); 1196 1197 spdk_json_write_name(w, "luns"); 1198 spdk_json_write_array_begin(w); 1199 1200 for (lun_idx = 0; lun_idx < SPDK_SCSI_DEV_MAX_LUN; lun_idx++) { 1201 lun = spdk_scsi_dev_get_lun(sdev, lun_idx); 1202 if (!lun) { 1203 continue; 1204 } 1205 1206 spdk_json_write_object_begin(w); 1207 1208 spdk_json_write_name(w, "id"); 1209 spdk_json_write_int32(w, spdk_scsi_lun_get_id(lun)); 1210 1211 spdk_json_write_name(w, "bdev_name"); 1212 spdk_json_write_string(w, spdk_scsi_lun_get_bdev_name(lun)); 1213 1214 spdk_json_write_object_end(w); 1215 } 1216 1217 spdk_json_write_array_end(w); 1218 spdk_json_write_object_end(w); 1219 } 1220 1221 spdk_json_write_array_end(w); 1222 } 1223 1224 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi", SPDK_LOG_VHOST_SCSI) 1225 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi_queue", SPDK_LOG_VHOST_SCSI_QUEUE) 1226 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi_data", SPDK_LOG_VHOST_SCSI_DATA) 1227