1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include <linux/virtio_scsi.h> 37 38 #include "spdk/env.h" 39 #include "spdk/thread.h" 40 #include "spdk/scsi.h" 41 #include "spdk/scsi_spec.h" 42 #include "spdk/conf.h" 43 #include "spdk/event.h" 44 #include "spdk/util.h" 45 #include "spdk/likely.h" 46 47 #include "spdk/vhost.h" 48 #include "vhost_internal.h" 49 50 /* Features supported by SPDK VHOST lib. */ 51 #define SPDK_VHOST_SCSI_FEATURES (SPDK_VHOST_FEATURES | \ 52 (1ULL << VIRTIO_SCSI_F_INOUT) | \ 53 (1ULL << VIRTIO_SCSI_F_HOTPLUG) | \ 54 (1ULL << VIRTIO_SCSI_F_CHANGE ) | \ 55 (1ULL << VIRTIO_SCSI_F_T10_PI )) 56 57 /* Features that are specified in VIRTIO SCSI but currently not supported: 58 * - Live migration not supported yet 59 * - T10 PI 60 */ 61 #define SPDK_VHOST_SCSI_DISABLED_FEATURES (SPDK_VHOST_DISABLED_FEATURES | \ 62 (1ULL << VIRTIO_SCSI_F_T10_PI )) 63 64 #define MGMT_POLL_PERIOD_US (1000 * 5) 65 66 #define VIRTIO_SCSI_CONTROLQ 0 67 #define VIRTIO_SCSI_EVENTQ 1 68 #define VIRTIO_SCSI_REQUESTQ 2 69 70 struct spdk_scsi_dev_vhost_state { 71 bool removed; 72 spdk_vhost_event_fn remove_cb; 73 void *remove_ctx; 74 }; 75 76 struct spdk_vhost_scsi_dev { 77 struct spdk_vhost_dev vdev; 78 struct spdk_scsi_dev *scsi_dev[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS]; 79 struct spdk_scsi_dev_vhost_state scsi_dev_state[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS]; 80 81 struct spdk_poller *requestq_poller; 82 struct spdk_poller *mgmt_poller; 83 struct spdk_vhost_dev_destroy_ctx destroy_ctx; 84 } __rte_cache_aligned; 85 86 struct spdk_vhost_scsi_task { 87 struct spdk_scsi_task scsi; 88 struct iovec iovs[SPDK_VHOST_IOVS_MAX]; 89 90 union { 91 struct virtio_scsi_cmd_resp *resp; 92 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 93 }; 94 95 struct spdk_vhost_scsi_dev *svdev; 96 struct spdk_scsi_dev *scsi_dev; 97 98 /** Number of bytes that were written. */ 99 uint32_t used_len; 100 101 int req_idx; 102 103 /* If set, the task is currently used for I/O processing. */ 104 bool used; 105 106 struct spdk_vhost_virtqueue *vq; 107 }; 108 109 static int spdk_vhost_scsi_start(struct spdk_vhost_dev *, void *); 110 static int spdk_vhost_scsi_stop(struct spdk_vhost_dev *, void *); 111 static void spdk_vhost_scsi_dump_info_json(struct spdk_vhost_dev *vdev, 112 struct spdk_json_write_ctx *w); 113 static void spdk_vhost_scsi_write_config_json(struct spdk_vhost_dev *vdev, 114 struct spdk_json_write_ctx *w); 115 static int spdk_vhost_scsi_dev_remove(struct spdk_vhost_dev *vdev); 116 117 const struct spdk_vhost_dev_backend spdk_vhost_scsi_device_backend = { 118 .virtio_features = SPDK_VHOST_SCSI_FEATURES, 119 .disabled_features = SPDK_VHOST_SCSI_DISABLED_FEATURES, 120 .start_device = spdk_vhost_scsi_start, 121 .stop_device = spdk_vhost_scsi_stop, 122 .dump_info_json = spdk_vhost_scsi_dump_info_json, 123 .write_config_json = spdk_vhost_scsi_write_config_json, 124 .remove_device = spdk_vhost_scsi_dev_remove, 125 }; 126 127 static void 128 spdk_vhost_scsi_task_put(struct spdk_vhost_scsi_task *task) 129 { 130 spdk_scsi_task_put(&task->scsi); 131 } 132 133 static void 134 spdk_vhost_scsi_task_free_cb(struct spdk_scsi_task *scsi_task) 135 { 136 struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi); 137 138 assert(task->svdev->vdev.task_cnt > 0); 139 task->svdev->vdev.task_cnt--; 140 task->used = false; 141 } 142 143 static void 144 process_removed_devs(struct spdk_vhost_scsi_dev *svdev) 145 { 146 struct spdk_scsi_dev *dev; 147 struct spdk_scsi_dev_vhost_state *state; 148 int i; 149 150 for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) { 151 dev = svdev->scsi_dev[i]; 152 state = &svdev->scsi_dev_state[i]; 153 154 if (dev && state->removed && !spdk_scsi_dev_has_pending_tasks(dev)) { 155 spdk_scsi_dev_free_io_channels(dev); 156 svdev->scsi_dev[i] = NULL; 157 spdk_scsi_dev_destruct(dev); 158 if (state->remove_cb) { 159 state->remove_cb(&svdev->vdev, state->remove_ctx); 160 state->remove_cb = NULL; 161 } 162 SPDK_INFOLOG(SPDK_LOG_VHOST, "%s: hot-detached device 'Dev %u'.\n", 163 svdev->vdev.name, i); 164 } 165 } 166 } 167 168 static void 169 eventq_enqueue(struct spdk_vhost_scsi_dev *svdev, unsigned scsi_dev_num, uint32_t event, 170 uint32_t reason) 171 { 172 struct spdk_vhost_virtqueue *vq; 173 struct vring_desc *desc, *desc_table; 174 struct virtio_scsi_event *desc_ev; 175 uint32_t desc_table_size, req_size = 0; 176 uint16_t req; 177 int rc; 178 179 assert(scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS); 180 vq = &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ]; 181 182 if (spdk_vhost_vq_avail_ring_get(vq, &req, 1) != 1) { 183 SPDK_ERRLOG("Controller %s: Failed to send virtio event (no avail ring entries?).\n", 184 svdev->vdev.name); 185 return; 186 } 187 188 rc = spdk_vhost_vq_get_desc(&svdev->vdev, vq, req, &desc, &desc_table, &desc_table_size); 189 if (rc != 0 || desc->len < sizeof(*desc_ev)) { 190 SPDK_ERRLOG("Controller %s: Invalid eventq descriptor at index %"PRIu16".\n", 191 svdev->vdev.name, req); 192 goto out; 193 } 194 195 desc_ev = spdk_vhost_gpa_to_vva(&svdev->vdev, desc->addr, sizeof(*desc_ev)); 196 if (desc_ev == NULL) { 197 SPDK_ERRLOG("Controller %s: Eventq descriptor at index %"PRIu16" points to unmapped guest memory address %p.\n", 198 svdev->vdev.name, req, (void *)(uintptr_t)desc->addr); 199 goto out; 200 } 201 202 desc_ev->event = event; 203 desc_ev->lun[0] = 1; 204 desc_ev->lun[1] = scsi_dev_num; 205 /* virtio LUN id 0 can refer either to the entire device 206 * or actual LUN 0 (the only supported by vhost for now) 207 */ 208 desc_ev->lun[2] = 0 >> 8; 209 desc_ev->lun[3] = 0 & 0xFF; 210 /* virtio doesn't specify any strict format for LUN id (bytes 2 and 3) 211 * current implementation relies on linux kernel sources 212 */ 213 memset(&desc_ev->lun[4], 0, 4); 214 desc_ev->reason = reason; 215 req_size = sizeof(*desc_ev); 216 217 out: 218 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, req, req_size); 219 } 220 221 static void 222 submit_completion(struct spdk_vhost_scsi_task *task) 223 { 224 spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx, 225 task->used_len); 226 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "Finished task (%p) req_idx=%d\n", task, task->req_idx); 227 228 spdk_vhost_scsi_task_put(task); 229 } 230 231 static void 232 spdk_vhost_scsi_task_mgmt_cpl(struct spdk_scsi_task *scsi_task) 233 { 234 struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi); 235 236 submit_completion(task); 237 } 238 239 static void 240 spdk_vhost_scsi_task_cpl(struct spdk_scsi_task *scsi_task) 241 { 242 struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi); 243 244 /* The SCSI task has completed. Do final processing and then post 245 notification to the virtqueue's "used" ring. 246 */ 247 task->resp->status = task->scsi.status; 248 249 if (task->scsi.status != SPDK_SCSI_STATUS_GOOD) { 250 memcpy(task->resp->sense, task->scsi.sense_data, task->scsi.sense_data_len); 251 task->resp->sense_len = task->scsi.sense_data_len; 252 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "Task (%p) req_idx=%d failed - status=%u\n", task, task->req_idx, 253 task->scsi.status); 254 } 255 assert(task->scsi.transfer_len == task->scsi.length); 256 task->resp->resid = task->scsi.length - task->scsi.data_transferred; 257 258 submit_completion(task); 259 } 260 261 static void 262 task_submit(struct spdk_vhost_scsi_task *task) 263 { 264 task->resp->response = VIRTIO_SCSI_S_OK; 265 spdk_scsi_dev_queue_task(task->scsi_dev, &task->scsi); 266 } 267 268 static void 269 mgmt_task_submit(struct spdk_vhost_scsi_task *task, enum spdk_scsi_task_func func) 270 { 271 task->tmf_resp->response = VIRTIO_SCSI_S_OK; 272 spdk_scsi_dev_queue_mgmt_task(task->scsi_dev, &task->scsi, func); 273 } 274 275 static void 276 invalid_request(struct spdk_vhost_scsi_task *task) 277 { 278 spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx, 279 task->used_len); 280 spdk_vhost_scsi_task_put(task); 281 282 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "Invalid request (status=%" PRIu8")\n", 283 task->resp ? task->resp->response : -1); 284 } 285 286 static int 287 spdk_vhost_scsi_task_init_target(struct spdk_vhost_scsi_task *task, const __u8 *lun) 288 { 289 struct spdk_scsi_dev *dev; 290 uint16_t lun_id = (((uint16_t)lun[2] << 8) | lun[3]) & 0x3FFF; 291 292 SPDK_LOGDUMP(SPDK_LOG_VHOST_SCSI_QUEUE, "LUN", lun, 8); 293 294 /* First byte must be 1 and second is target */ 295 if (lun[0] != 1 || lun[1] >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) { 296 return -1; 297 } 298 299 dev = task->svdev->scsi_dev[lun[1]]; 300 task->scsi_dev = dev; 301 if (dev == NULL || task->svdev->scsi_dev_state[lun[1]].removed) { 302 /* If dev has been hotdetached, return 0 to allow sending 303 * additional hotremove event via sense codes. 304 */ 305 return task->svdev->scsi_dev_state[lun[1]].removed ? 0 : -1; 306 } 307 308 task->scsi.target_port = spdk_scsi_dev_find_port_by_id(task->scsi_dev, 0); 309 task->scsi.lun = spdk_scsi_dev_get_lun(dev, lun_id); 310 return 0; 311 } 312 313 static void 314 process_ctrl_request(struct spdk_vhost_scsi_task *task) 315 { 316 struct spdk_vhost_dev *vdev = &task->svdev->vdev; 317 struct vring_desc *desc, *desc_table; 318 struct virtio_scsi_ctrl_tmf_req *ctrl_req; 319 struct virtio_scsi_ctrl_an_resp *an_resp; 320 uint32_t desc_table_size, used_len = 0; 321 int rc; 322 323 spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_mgmt_cpl, spdk_vhost_scsi_task_free_cb); 324 rc = spdk_vhost_vq_get_desc(vdev, task->vq, task->req_idx, &desc, &desc_table, &desc_table_size); 325 if (spdk_unlikely(rc != 0)) { 326 SPDK_ERRLOG("%s: Invalid controlq descriptor at index %d.\n", 327 vdev->name, task->req_idx); 328 goto out; 329 } 330 331 ctrl_req = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*ctrl_req)); 332 if (ctrl_req == NULL) { 333 SPDK_ERRLOG("%s: Invalid task management request at index %d.\n", 334 vdev->name, task->req_idx); 335 goto out; 336 } 337 338 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, 339 "Processing controlq descriptor: desc %d/%p, desc_addr %p, len %d, flags %d, last_used_idx %d; kickfd %d; size %d\n", 340 task->req_idx, desc, (void *)desc->addr, desc->len, desc->flags, task->vq->vring.last_used_idx, 341 task->vq->vring.kickfd, task->vq->vring.size); 342 SPDK_LOGDUMP(SPDK_LOG_VHOST_SCSI_QUEUE, "Request descriptor", (uint8_t *)ctrl_req, desc->len); 343 344 spdk_vhost_scsi_task_init_target(task, ctrl_req->lun); 345 346 spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_size); 347 if (spdk_unlikely(desc == NULL)) { 348 SPDK_ERRLOG("%s: No response descriptor for controlq request %d.\n", 349 vdev->name, task->req_idx); 350 goto out; 351 } 352 353 /* Process the TMF request */ 354 switch (ctrl_req->type) { 355 case VIRTIO_SCSI_T_TMF: 356 task->tmf_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->tmf_resp)); 357 if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_ctrl_tmf_resp) || task->tmf_resp == NULL)) { 358 SPDK_ERRLOG("%s: TMF response descriptor at index %d points to invalid guest memory region\n", 359 vdev->name, task->req_idx); 360 goto out; 361 } 362 363 /* Check if we are processing a valid request */ 364 if (task->scsi_dev == NULL) { 365 task->tmf_resp->response = VIRTIO_SCSI_S_BAD_TARGET; 366 break; 367 } 368 369 switch (ctrl_req->subtype) { 370 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET: 371 /* Handle LUN reset */ 372 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "LUN reset\n"); 373 374 mgmt_task_submit(task, SPDK_SCSI_TASK_FUNC_LUN_RESET); 375 return; 376 default: 377 task->tmf_resp->response = VIRTIO_SCSI_S_ABORTED; 378 /* Unsupported command */ 379 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "Unsupported TMF command %x\n", ctrl_req->subtype); 380 break; 381 } 382 break; 383 case VIRTIO_SCSI_T_AN_QUERY: 384 case VIRTIO_SCSI_T_AN_SUBSCRIBE: { 385 an_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*an_resp)); 386 if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_ctrl_an_resp) || an_resp == NULL)) { 387 SPDK_WARNLOG("%s: Asynchronous response descriptor points to invalid guest memory region\n", 388 vdev->name); 389 goto out; 390 } 391 392 an_resp->response = VIRTIO_SCSI_S_ABORTED; 393 break; 394 } 395 default: 396 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "Unsupported control command %x\n", ctrl_req->type); 397 break; 398 } 399 400 used_len = sizeof(struct virtio_scsi_ctrl_tmf_resp); 401 out: 402 spdk_vhost_vq_used_ring_enqueue(vdev, task->vq, task->req_idx, used_len); 403 spdk_vhost_scsi_task_put(task); 404 } 405 406 /* 407 * Process task's descriptor chain and setup data related fields. 408 * Return 409 * -1 if request is invalid and must be aborted, 410 * 0 if all data are set. 411 */ 412 static int 413 task_data_setup(struct spdk_vhost_scsi_task *task, 414 struct virtio_scsi_cmd_req **req) 415 { 416 struct spdk_vhost_dev *vdev = &task->svdev->vdev; 417 struct vring_desc *desc, *desc_table; 418 struct iovec *iovs = task->iovs; 419 uint16_t iovcnt = 0; 420 uint32_t desc_table_len, len = 0; 421 int rc; 422 423 spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_cpl, spdk_vhost_scsi_task_free_cb); 424 425 rc = spdk_vhost_vq_get_desc(vdev, task->vq, task->req_idx, &desc, &desc_table, &desc_table_len); 426 /* First descriptor must be readable */ 427 if (spdk_unlikely(rc != 0 || spdk_vhost_vring_desc_is_wr(desc) || 428 desc->len < sizeof(struct virtio_scsi_cmd_req))) { 429 SPDK_WARNLOG("%s: invalid first (request) descriptor at index %"PRIu16".\n", 430 vdev->name, task->req_idx); 431 goto invalid_task; 432 } 433 434 *req = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(**req)); 435 if (spdk_unlikely(*req == NULL)) { 436 SPDK_WARNLOG("%s: Request descriptor at index %d points to invalid guest memory region\n", 437 vdev->name, task->req_idx); 438 goto invalid_task; 439 } 440 441 /* Each request must have at least 2 descriptors (e.g. request and response) */ 442 spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len); 443 if (desc == NULL) { 444 SPDK_WARNLOG("%s: Descriptor chain at index %d contains neither payload nor response buffer.\n", 445 vdev->name, task->req_idx); 446 goto invalid_task; 447 } 448 task->scsi.dxfer_dir = spdk_vhost_vring_desc_is_wr(desc) ? SPDK_SCSI_DIR_FROM_DEV : 449 SPDK_SCSI_DIR_TO_DEV; 450 task->scsi.iovs = iovs; 451 452 if (task->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV) { 453 /* 454 * FROM_DEV (READ): [RD_req][WR_resp][WR_buf0]...[WR_bufN] 455 */ 456 task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->resp)); 457 if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_cmd_resp) || task->resp == NULL)) { 458 SPDK_WARNLOG("%s: Response descriptor at index %d points to invalid guest memory region\n", 459 vdev->name, task->req_idx); 460 goto invalid_task; 461 } 462 rc = spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len); 463 if (spdk_unlikely(rc != 0)) { 464 SPDK_WARNLOG("%s: invalid descriptor chain at request index %d (descriptor id overflow?).\n", 465 vdev->name, task->req_idx); 466 goto invalid_task; 467 } 468 469 if (desc == NULL) { 470 /* 471 * TEST UNIT READY command and some others might not contain any payload and this is not an error. 472 */ 473 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA, 474 "No payload descriptors for FROM DEV command req_idx=%"PRIu16".\n", task->req_idx); 475 SPDK_LOGDUMP(SPDK_LOG_VHOST_SCSI_DATA, "CDB=", (*req)->cdb, VIRTIO_SCSI_CDB_SIZE); 476 task->used_len = sizeof(struct virtio_scsi_cmd_resp); 477 task->scsi.iovcnt = 1; 478 task->scsi.iovs[0].iov_len = 0; 479 task->scsi.length = 0; 480 task->scsi.transfer_len = 0; 481 return 0; 482 } 483 484 /* All remaining descriptors are data. */ 485 while (desc) { 486 if (spdk_unlikely(!spdk_vhost_vring_desc_is_wr(desc))) { 487 SPDK_WARNLOG("FROM DEV cmd: descriptor nr %" PRIu16" in payload chain is read only.\n", iovcnt); 488 goto invalid_task; 489 } 490 491 if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) { 492 goto invalid_task; 493 } 494 len += desc->len; 495 496 rc = spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len); 497 if (spdk_unlikely(rc != 0)) { 498 SPDK_WARNLOG("%s: invalid payload in descriptor chain starting at index %d.\n", 499 vdev->name, task->req_idx); 500 goto invalid_task; 501 } 502 } 503 504 task->used_len = sizeof(struct virtio_scsi_cmd_resp) + len; 505 } else { 506 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA, "TO DEV"); 507 /* 508 * TO_DEV (WRITE):[RD_req][RD_buf0]...[RD_bufN][WR_resp] 509 * No need to check descriptor WR flag as this is done while setting scsi.dxfer_dir. 510 */ 511 512 /* Process descriptors up to response. */ 513 while (!spdk_vhost_vring_desc_is_wr(desc)) { 514 if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) { 515 goto invalid_task; 516 } 517 len += desc->len; 518 519 spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len); 520 if (spdk_unlikely(desc == NULL)) { 521 SPDK_WARNLOG("TO_DEV cmd: no response descriptor.\n"); 522 goto invalid_task; 523 } 524 } 525 526 task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->resp)); 527 if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_cmd_resp) || task->resp == NULL)) { 528 SPDK_WARNLOG("%s: Response descriptor at index %d points to invalid guest memory region\n", 529 vdev->name, task->req_idx); 530 goto invalid_task; 531 } 532 533 task->used_len = sizeof(struct virtio_scsi_cmd_resp); 534 } 535 536 task->scsi.iovcnt = iovcnt; 537 task->scsi.length = len; 538 task->scsi.transfer_len = len; 539 return 0; 540 541 invalid_task: 542 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA, "%s: Invalid task at index %"PRIu16".\n", 543 vdev->name, task->req_idx); 544 return -1; 545 } 546 547 static int 548 process_request(struct spdk_vhost_scsi_task *task) 549 { 550 struct virtio_scsi_cmd_req *req; 551 int result; 552 553 result = task_data_setup(task, &req); 554 if (result) { 555 return result; 556 } 557 558 result = spdk_vhost_scsi_task_init_target(task, req->lun); 559 if (spdk_unlikely(result != 0)) { 560 task->resp->response = VIRTIO_SCSI_S_BAD_TARGET; 561 return -1; 562 } 563 564 task->scsi.cdb = req->cdb; 565 SPDK_LOGDUMP(SPDK_LOG_VHOST_SCSI_DATA, "request CDB", req->cdb, VIRTIO_SCSI_CDB_SIZE); 566 567 if (spdk_unlikely(task->scsi.lun == NULL)) { 568 spdk_scsi_task_process_null_lun(&task->scsi); 569 task->resp->response = VIRTIO_SCSI_S_OK; 570 return 1; 571 } 572 573 return 0; 574 } 575 576 static void 577 process_controlq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq) 578 { 579 struct spdk_vhost_scsi_task *task; 580 uint16_t reqs[32]; 581 uint16_t reqs_cnt, i; 582 583 reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs)); 584 for (i = 0; i < reqs_cnt; i++) { 585 if (spdk_unlikely(reqs[i] >= vq->vring.size)) { 586 SPDK_ERRLOG("%s: invalid entry in avail ring. Buffer '%"PRIu16"' exceeds virtqueue size (%"PRIu16")\n", 587 svdev->vdev.name, reqs[i], vq->vring.size); 588 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0); 589 continue; 590 } 591 592 task = &((struct spdk_vhost_scsi_task *)vq->tasks)[reqs[i]]; 593 if (spdk_unlikely(task->used)) { 594 SPDK_ERRLOG("%s: invalid entry in avail ring. Buffer '%"PRIu16"' is still in use!\n", 595 svdev->vdev.name, reqs[i]); 596 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0); 597 continue; 598 } 599 600 svdev->vdev.task_cnt++; 601 memset(&task->scsi, 0, sizeof(task->scsi)); 602 task->tmf_resp = NULL; 603 task->used = true; 604 process_ctrl_request(task); 605 } 606 } 607 608 static void 609 process_requestq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq) 610 { 611 struct spdk_vhost_scsi_task *task; 612 uint16_t reqs[32]; 613 uint16_t reqs_cnt, i; 614 int result; 615 616 reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs)); 617 assert(reqs_cnt <= 32); 618 619 for (i = 0; i < reqs_cnt; i++) { 620 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Starting processing request idx %"PRIu16"======\n", 621 reqs[i]); 622 623 if (spdk_unlikely(reqs[i] >= vq->vring.size)) { 624 SPDK_ERRLOG("%s: request idx '%"PRIu16"' exceeds virtqueue size (%"PRIu16").\n", 625 svdev->vdev.name, reqs[i], vq->vring.size); 626 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0); 627 continue; 628 } 629 630 task = &((struct spdk_vhost_scsi_task *)vq->tasks)[reqs[i]]; 631 if (spdk_unlikely(task->used)) { 632 SPDK_ERRLOG("%s: request with idx '%"PRIu16"' is already pending.\n", 633 svdev->vdev.name, reqs[i]); 634 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0); 635 continue; 636 } 637 638 svdev->vdev.task_cnt++; 639 memset(&task->scsi, 0, sizeof(task->scsi)); 640 task->resp = NULL; 641 task->used = true; 642 task->used_len = 0; 643 result = process_request(task); 644 if (likely(result == 0)) { 645 task_submit(task); 646 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d submitted ======\n", task, 647 task->req_idx); 648 } else if (result > 0) { 649 spdk_vhost_scsi_task_cpl(&task->scsi); 650 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d finished early ======\n", task, 651 task->req_idx); 652 } else { 653 invalid_request(task); 654 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d failed ======\n", task, 655 task->req_idx); 656 } 657 } 658 } 659 660 static int 661 vdev_mgmt_worker(void *arg) 662 { 663 struct spdk_vhost_scsi_dev *svdev = arg; 664 665 process_removed_devs(svdev); 666 spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ]); 667 668 process_controlq(svdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]); 669 spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]); 670 671 return -1; 672 } 673 674 static int 675 vdev_worker(void *arg) 676 { 677 struct spdk_vhost_scsi_dev *svdev = arg; 678 uint32_t q_idx; 679 680 for (q_idx = VIRTIO_SCSI_REQUESTQ; q_idx < svdev->vdev.max_queues; q_idx++) { 681 process_requestq(svdev, &svdev->vdev.virtqueue[q_idx]); 682 } 683 684 spdk_vhost_dev_used_signal(&svdev->vdev); 685 686 return -1; 687 } 688 689 static struct spdk_vhost_scsi_dev * 690 to_scsi_dev(struct spdk_vhost_dev *ctrlr) 691 { 692 if (ctrlr == NULL) { 693 return NULL; 694 } 695 696 if (ctrlr->backend != &spdk_vhost_scsi_device_backend) { 697 SPDK_ERRLOG("%s: not a vhost-scsi device.\n", ctrlr->name); 698 return NULL; 699 } 700 701 return SPDK_CONTAINEROF(ctrlr, struct spdk_vhost_scsi_dev, vdev); 702 } 703 704 int 705 spdk_vhost_scsi_dev_construct(const char *name, const char *cpumask) 706 { 707 struct spdk_vhost_scsi_dev *svdev = spdk_dma_zmalloc(sizeof(struct spdk_vhost_scsi_dev), 708 SPDK_CACHE_LINE_SIZE, NULL); 709 int rc; 710 711 if (svdev == NULL) { 712 return -ENOMEM; 713 } 714 715 spdk_vhost_lock(); 716 rc = spdk_vhost_dev_register(&svdev->vdev, name, cpumask, 717 &spdk_vhost_scsi_device_backend); 718 719 if (rc) { 720 spdk_dma_free(svdev); 721 } 722 723 spdk_vhost_unlock(); 724 return rc; 725 } 726 727 static int 728 spdk_vhost_scsi_dev_remove(struct spdk_vhost_dev *vdev) 729 { 730 struct spdk_vhost_scsi_dev *svdev = to_scsi_dev(vdev); 731 int rc, i; 732 733 if (svdev == NULL) { 734 return -EINVAL; 735 } 736 737 for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) { 738 if (svdev->scsi_dev[i]) { 739 if (vdev->registered) { 740 SPDK_ERRLOG("Trying to remove non-empty controller: %s.\n", vdev->name); 741 return -EBUSY; 742 } 743 744 rc = spdk_vhost_scsi_dev_remove_tgt(vdev, i, NULL, NULL); 745 if (rc != 0) { 746 SPDK_ERRLOG("%s: failed to force-remove target %d\n", vdev->name, i); 747 return rc; 748 } 749 } 750 } 751 752 rc = spdk_vhost_dev_unregister(vdev); 753 if (rc != 0) { 754 return rc; 755 } 756 757 spdk_dma_free(svdev); 758 return 0; 759 } 760 761 struct spdk_scsi_dev * 762 spdk_vhost_scsi_dev_get_tgt(struct spdk_vhost_dev *vdev, uint8_t num) 763 { 764 struct spdk_vhost_scsi_dev *svdev; 765 766 assert(num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS); 767 svdev = to_scsi_dev(vdev); 768 769 return svdev ? svdev->scsi_dev[num] : NULL; 770 } 771 772 static void 773 spdk_vhost_scsi_lun_hotremove(const struct spdk_scsi_lun *lun, void *arg) 774 { 775 struct spdk_vhost_scsi_dev *svdev = arg; 776 const struct spdk_scsi_dev *scsi_dev; 777 unsigned scsi_dev_num; 778 779 assert(lun != NULL); 780 assert(svdev != NULL); 781 if (svdev->vdev.lcore != -1 && 782 !spdk_vhost_dev_has_feature(&svdev->vdev, VIRTIO_SCSI_F_HOTPLUG)) { 783 SPDK_WARNLOG("%s: hotremove is not enabled for this controller.\n", svdev->vdev.name); 784 return; 785 } 786 787 scsi_dev = spdk_scsi_lun_get_dev(lun); 788 for (scsi_dev_num = 0; scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; scsi_dev_num++) { 789 if (svdev->scsi_dev[scsi_dev_num] == scsi_dev) { 790 break; 791 } 792 } 793 794 if (scsi_dev_num == SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) { 795 /* The entire device has been already removed. */ 796 return; 797 } 798 799 /* remove entire device */ 800 spdk_vhost_scsi_dev_remove_tgt(&svdev->vdev, scsi_dev_num, NULL, NULL); 801 } 802 803 int 804 spdk_vhost_scsi_dev_add_tgt(struct spdk_vhost_dev *vdev, unsigned scsi_tgt_num, 805 const char *bdev_name) 806 { 807 struct spdk_vhost_scsi_dev *svdev; 808 char target_name[SPDK_SCSI_DEV_MAX_NAME]; 809 int lun_id_list[1]; 810 const char *bdev_names_list[1]; 811 812 svdev = to_scsi_dev(vdev); 813 if (svdev == NULL) { 814 return -EINVAL; 815 } 816 817 if (scsi_tgt_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) { 818 SPDK_ERRLOG("Controller %d target number too big (max %d)\n", scsi_tgt_num, 819 SPDK_VHOST_SCSI_CTRLR_MAX_DEVS); 820 return -EINVAL; 821 } 822 823 if (bdev_name == NULL) { 824 SPDK_ERRLOG("No lun name specified\n"); 825 return -EINVAL; 826 } 827 828 if (svdev->scsi_dev[scsi_tgt_num] != NULL) { 829 SPDK_ERRLOG("Controller %s target %u already occupied\n", vdev->name, scsi_tgt_num); 830 return -EEXIST; 831 } 832 833 /* 834 * At this stage only one LUN per target 835 */ 836 snprintf(target_name, sizeof(target_name), "Target %u", scsi_tgt_num); 837 lun_id_list[0] = 0; 838 bdev_names_list[0] = (char *)bdev_name; 839 840 svdev->scsi_dev_state[scsi_tgt_num].removed = false; 841 svdev->scsi_dev[scsi_tgt_num] = spdk_scsi_dev_construct(target_name, bdev_names_list, lun_id_list, 842 1, 843 SPDK_SPC_PROTOCOL_IDENTIFIER_SAS, spdk_vhost_scsi_lun_hotremove, svdev); 844 845 if (svdev->scsi_dev[scsi_tgt_num] == NULL) { 846 SPDK_ERRLOG("Couldn't create spdk SCSI target '%s' using bdev '%s' in controller: %s\n", 847 target_name, bdev_name, vdev->name); 848 return -EINVAL; 849 } 850 spdk_scsi_dev_add_port(svdev->scsi_dev[scsi_tgt_num], 0, "vhost"); 851 852 SPDK_INFOLOG(SPDK_LOG_VHOST, "Controller %s: defined target '%s' using bdev '%s'\n", 853 vdev->name, target_name, bdev_name); 854 855 if (vdev->lcore == -1) { 856 /* All done. */ 857 return 0; 858 } 859 860 spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[scsi_tgt_num]); 861 862 if (spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { 863 eventq_enqueue(svdev, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, 864 VIRTIO_SCSI_EVT_RESET_RESCAN); 865 } else { 866 SPDK_NOTICELOG("Device %s does not support hotplug. " 867 "Please restart the driver or perform a rescan.\n", 868 vdev->name); 869 } 870 871 return 0; 872 } 873 874 int 875 spdk_vhost_scsi_dev_remove_tgt(struct spdk_vhost_dev *vdev, unsigned scsi_tgt_num, 876 spdk_vhost_event_fn cb_fn, void *cb_arg) 877 { 878 struct spdk_vhost_scsi_dev *svdev; 879 struct spdk_scsi_dev *scsi_dev; 880 struct spdk_scsi_dev_vhost_state *scsi_dev_state; 881 int rc = 0; 882 883 if (scsi_tgt_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) { 884 SPDK_ERRLOG("%s: invalid target number %d\n", vdev->name, scsi_tgt_num); 885 return -EINVAL; 886 } 887 888 svdev = to_scsi_dev(vdev); 889 if (svdev == NULL) { 890 return -ENODEV; 891 } 892 893 scsi_dev = svdev->scsi_dev[scsi_tgt_num]; 894 if (scsi_dev == NULL) { 895 SPDK_ERRLOG("Controller %s target %u is not occupied\n", vdev->name, scsi_tgt_num); 896 return -ENODEV; 897 } 898 899 if (svdev->vdev.lcore == -1) { 900 /* controller is not in use, remove dev and exit */ 901 svdev->scsi_dev[scsi_tgt_num] = NULL; 902 spdk_scsi_dev_destruct(scsi_dev); 903 if (cb_fn) { 904 rc = cb_fn(vdev, cb_arg); 905 } 906 SPDK_INFOLOG(SPDK_LOG_VHOST, "%s: removed target 'Target %u'\n", 907 vdev->name, scsi_tgt_num); 908 return rc; 909 } 910 911 if (!spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { 912 SPDK_WARNLOG("%s: 'Target %u' is in use and hot-detach is not enabled for this controller.\n", 913 svdev->vdev.name, scsi_tgt_num); 914 return -ENOTSUP; 915 } 916 917 scsi_dev_state = &svdev->scsi_dev_state[scsi_tgt_num]; 918 if (scsi_dev_state->removed) { 919 SPDK_WARNLOG("%s: 'Target %u' has been already marked to hotremove.\n", svdev->vdev.name, 920 scsi_tgt_num); 921 return -EBUSY; 922 } 923 924 scsi_dev_state->remove_cb = cb_fn; 925 scsi_dev_state->remove_ctx = cb_arg; 926 scsi_dev_state->removed = true; 927 eventq_enqueue(svdev, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_REMOVED); 928 929 SPDK_INFOLOG(SPDK_LOG_VHOST, "%s: queued 'Target %u' for hot-detach.\n", vdev->name, scsi_tgt_num); 930 return 0; 931 } 932 933 int 934 spdk_vhost_scsi_controller_construct(void) 935 { 936 struct spdk_conf_section *sp = spdk_conf_first_section(NULL); 937 struct spdk_vhost_dev *vdev; 938 int i, dev_num; 939 unsigned ctrlr_num = 0; 940 char *bdev_name, *tgt_num_str; 941 char *cpumask; 942 char *name; 943 char *tgt = NULL; 944 945 while (sp != NULL) { 946 if (!spdk_conf_section_match_prefix(sp, "VhostScsi")) { 947 sp = spdk_conf_next_section(sp); 948 continue; 949 } 950 951 if (sscanf(spdk_conf_section_get_name(sp), "VhostScsi%u", &ctrlr_num) != 1) { 952 SPDK_ERRLOG("Section '%s' has non-numeric suffix.\n", 953 spdk_conf_section_get_name(sp)); 954 return -1; 955 } 956 957 name = spdk_conf_section_get_val(sp, "Name"); 958 cpumask = spdk_conf_section_get_val(sp, "Cpumask"); 959 960 if (spdk_vhost_scsi_dev_construct(name, cpumask) < 0) { 961 return -1; 962 } 963 964 vdev = spdk_vhost_dev_find(name); 965 assert(vdev); 966 967 for (i = 0; ; i++) { 968 969 tgt = spdk_conf_section_get_nval(sp, "Target", i); 970 if (tgt == NULL) { 971 break; 972 } 973 974 tgt_num_str = spdk_conf_section_get_nmval(sp, "Target", i, 0); 975 if (tgt_num_str == NULL) { 976 SPDK_ERRLOG("%s: Invalid or missing target number\n", name); 977 return -1; 978 } 979 980 dev_num = (int)strtol(tgt_num_str, NULL, 10); 981 bdev_name = spdk_conf_section_get_nmval(sp, "Target", i, 1); 982 if (bdev_name == NULL) { 983 SPDK_ERRLOG("%s: Invalid or missing bdev name for target %d\n", name, dev_num); 984 return -1; 985 } else if (spdk_conf_section_get_nmval(sp, "Target", i, 2)) { 986 SPDK_ERRLOG("%s: Only one LUN per vhost SCSI device supported\n", name); 987 return -1; 988 } 989 990 if (spdk_vhost_scsi_dev_add_tgt(vdev, dev_num, bdev_name) < 0) { 991 return -1; 992 } 993 } 994 995 sp = spdk_conf_next_section(sp); 996 } 997 998 return 0; 999 } 1000 1001 static void 1002 free_task_pool(struct spdk_vhost_scsi_dev *svdev) 1003 { 1004 struct spdk_vhost_virtqueue *vq; 1005 uint16_t i; 1006 1007 for (i = 0; i < svdev->vdev.max_queues; i++) { 1008 vq = &svdev->vdev.virtqueue[i]; 1009 if (vq->tasks == NULL) { 1010 continue; 1011 } 1012 1013 spdk_dma_free(vq->tasks); 1014 vq->tasks = NULL; 1015 } 1016 } 1017 1018 static int 1019 alloc_task_pool(struct spdk_vhost_scsi_dev *svdev) 1020 { 1021 struct spdk_vhost_virtqueue *vq; 1022 struct spdk_vhost_scsi_task *task; 1023 uint32_t task_cnt; 1024 uint16_t i; 1025 uint32_t j; 1026 1027 for (i = 0; i < svdev->vdev.max_queues; i++) { 1028 vq = &svdev->vdev.virtqueue[i]; 1029 if (vq->vring.desc == NULL) { 1030 continue; 1031 } 1032 1033 task_cnt = vq->vring.size; 1034 if (task_cnt > SPDK_VHOST_MAX_VQ_SIZE) { 1035 /* sanity check */ 1036 SPDK_ERRLOG("Controller %s: virtuque %"PRIu16" is too big. (size = %"PRIu32", max = %"PRIu32")\n", 1037 svdev->vdev.name, i, task_cnt, SPDK_VHOST_MAX_VQ_SIZE); 1038 free_task_pool(svdev); 1039 return -1; 1040 } 1041 vq->tasks = spdk_dma_zmalloc(sizeof(struct spdk_vhost_scsi_task) * task_cnt, 1042 SPDK_CACHE_LINE_SIZE, NULL); 1043 if (vq->tasks == NULL) { 1044 SPDK_ERRLOG("Controller %s: failed to allocate %"PRIu32" tasks for virtqueue %"PRIu16"\n", 1045 svdev->vdev.name, task_cnt, i); 1046 free_task_pool(svdev); 1047 return -1; 1048 } 1049 1050 for (j = 0; j < task_cnt; j++) { 1051 task = &((struct spdk_vhost_scsi_task *)vq->tasks)[j]; 1052 task->svdev = svdev; 1053 task->vq = vq; 1054 task->req_idx = j; 1055 } 1056 } 1057 1058 return 0; 1059 } 1060 1061 /* 1062 * A new device is added to a data core. First the device is added to the main linked list 1063 * and then allocated to a specific data core. 1064 */ 1065 static int 1066 spdk_vhost_scsi_start(struct spdk_vhost_dev *vdev, void *event_ctx) 1067 { 1068 struct spdk_vhost_scsi_dev *svdev; 1069 uint32_t i; 1070 int rc; 1071 1072 svdev = to_scsi_dev(vdev); 1073 if (svdev == NULL) { 1074 SPDK_ERRLOG("Trying to start non-scsi controller as a scsi one.\n"); 1075 rc = -1; 1076 goto out; 1077 } 1078 1079 /* validate all I/O queues are in a contiguous index range */ 1080 for (i = VIRTIO_SCSI_REQUESTQ; i < vdev->max_queues; i++) { 1081 if (vdev->virtqueue[i].vring.desc == NULL) { 1082 SPDK_ERRLOG("%s: queue %"PRIu32" is empty\n", vdev->name, i); 1083 rc = -1; 1084 goto out; 1085 } 1086 } 1087 1088 rc = alloc_task_pool(svdev); 1089 if (rc != 0) { 1090 SPDK_ERRLOG("%s: failed to alloc task pool.\n", vdev->name); 1091 goto out; 1092 } 1093 1094 for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) { 1095 if (svdev->scsi_dev[i] == NULL) { 1096 continue; 1097 } 1098 spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[i]); 1099 } 1100 SPDK_INFOLOG(SPDK_LOG_VHOST, "Started poller for vhost controller %s on lcore %d\n", 1101 vdev->name, vdev->lcore); 1102 1103 svdev->requestq_poller = spdk_poller_register(vdev_worker, svdev, 0); 1104 if (vdev->virtqueue[VIRTIO_SCSI_CONTROLQ].vring.desc && 1105 vdev->virtqueue[VIRTIO_SCSI_EVENTQ].vring.desc) { 1106 svdev->mgmt_poller = spdk_poller_register(vdev_mgmt_worker, svdev, 1107 MGMT_POLL_PERIOD_US); 1108 } 1109 out: 1110 spdk_vhost_dev_backend_event_done(event_ctx, rc); 1111 return rc; 1112 } 1113 1114 static int 1115 destroy_device_poller_cb(void *arg) 1116 { 1117 struct spdk_vhost_scsi_dev *svdev = arg; 1118 uint32_t i; 1119 1120 if (svdev->vdev.task_cnt > 0) { 1121 return -1; 1122 } 1123 1124 1125 for (i = 0; i < svdev->vdev.max_queues; i++) { 1126 spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[i]); 1127 } 1128 1129 for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) { 1130 if (svdev->scsi_dev[i] == NULL) { 1131 continue; 1132 } 1133 spdk_scsi_dev_free_io_channels(svdev->scsi_dev[i]); 1134 } 1135 1136 SPDK_INFOLOG(SPDK_LOG_VHOST, "Stopping poller for vhost controller %s\n", svdev->vdev.name); 1137 1138 free_task_pool(svdev); 1139 1140 spdk_poller_unregister(&svdev->destroy_ctx.poller); 1141 spdk_vhost_dev_backend_event_done(svdev->destroy_ctx.event_ctx, 0); 1142 1143 return -1; 1144 } 1145 1146 static int 1147 spdk_vhost_scsi_stop(struct spdk_vhost_dev *vdev, void *event_ctx) 1148 { 1149 struct spdk_vhost_scsi_dev *svdev; 1150 1151 svdev = to_scsi_dev(vdev); 1152 if (svdev == NULL) { 1153 SPDK_ERRLOG("Trying to stop non-scsi controller as a scsi one.\n"); 1154 goto err; 1155 } 1156 1157 svdev->destroy_ctx.event_ctx = event_ctx; 1158 spdk_poller_unregister(&svdev->requestq_poller); 1159 spdk_poller_unregister(&svdev->mgmt_poller); 1160 svdev->destroy_ctx.poller = spdk_poller_register(destroy_device_poller_cb, svdev, 1161 1000); 1162 1163 return 0; 1164 1165 err: 1166 spdk_vhost_dev_backend_event_done(event_ctx, -1); 1167 return -1; 1168 } 1169 1170 static void 1171 spdk_vhost_scsi_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w) 1172 { 1173 struct spdk_scsi_dev *sdev; 1174 struct spdk_scsi_lun *lun; 1175 uint32_t dev_idx; 1176 uint32_t lun_idx; 1177 1178 assert(vdev != NULL); 1179 spdk_json_write_name(w, "scsi"); 1180 spdk_json_write_array_begin(w); 1181 for (dev_idx = 0; dev_idx < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; dev_idx++) { 1182 sdev = spdk_vhost_scsi_dev_get_tgt(vdev, dev_idx); 1183 if (!sdev) { 1184 continue; 1185 } 1186 1187 spdk_json_write_object_begin(w); 1188 1189 spdk_json_write_name(w, "scsi_dev_num"); 1190 spdk_json_write_uint32(w, dev_idx); 1191 1192 spdk_json_write_name(w, "id"); 1193 spdk_json_write_int32(w, spdk_scsi_dev_get_id(sdev)); 1194 1195 spdk_json_write_name(w, "target_name"); 1196 spdk_json_write_string(w, spdk_scsi_dev_get_name(sdev)); 1197 1198 spdk_json_write_name(w, "luns"); 1199 spdk_json_write_array_begin(w); 1200 1201 for (lun_idx = 0; lun_idx < SPDK_SCSI_DEV_MAX_LUN; lun_idx++) { 1202 lun = spdk_scsi_dev_get_lun(sdev, lun_idx); 1203 if (!lun) { 1204 continue; 1205 } 1206 1207 spdk_json_write_object_begin(w); 1208 1209 spdk_json_write_name(w, "id"); 1210 spdk_json_write_int32(w, spdk_scsi_lun_get_id(lun)); 1211 1212 spdk_json_write_name(w, "bdev_name"); 1213 spdk_json_write_string(w, spdk_scsi_lun_get_bdev_name(lun)); 1214 1215 spdk_json_write_object_end(w); 1216 } 1217 1218 spdk_json_write_array_end(w); 1219 spdk_json_write_object_end(w); 1220 } 1221 1222 spdk_json_write_array_end(w); 1223 } 1224 1225 static void 1226 spdk_vhost_scsi_write_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w) 1227 { 1228 struct spdk_vhost_scsi_dev *svdev; 1229 struct spdk_scsi_lun *lun; 1230 uint32_t i; 1231 1232 svdev = to_scsi_dev(vdev); 1233 if (!svdev) { 1234 return; 1235 } 1236 1237 spdk_json_write_object_begin(w); 1238 spdk_json_write_named_string(w, "method", "construct_vhost_scsi_controller"); 1239 1240 spdk_json_write_named_object_begin(w, "params"); 1241 spdk_json_write_named_string(w, "ctrlr", vdev->name); 1242 spdk_json_write_named_string(w, "cpumask", spdk_cpuset_fmt(vdev->cpumask)); 1243 spdk_json_write_object_end(w); 1244 1245 spdk_json_write_object_end(w); 1246 1247 for (i = 0; i < SPDK_COUNTOF(svdev->scsi_dev); i++) { 1248 if (svdev->scsi_dev[i] == NULL || svdev->scsi_dev_state[i].removed) { 1249 continue; 1250 } 1251 1252 lun = spdk_scsi_dev_get_lun(svdev->scsi_dev[i], 0); 1253 1254 spdk_json_write_object_begin(w); 1255 spdk_json_write_named_string(w, "method", "add_vhost_scsi_lun"); 1256 1257 spdk_json_write_named_object_begin(w, "params"); 1258 spdk_json_write_named_string(w, "ctrlr", vdev->name); 1259 spdk_json_write_named_uint32(w, "scsi_target_num", i); 1260 1261 spdk_json_write_named_string(w, "bdev_name", spdk_scsi_lun_get_bdev_name(lun)); 1262 spdk_json_write_object_end(w); 1263 1264 spdk_json_write_object_end(w); 1265 } 1266 } 1267 1268 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi", SPDK_LOG_VHOST_SCSI) 1269 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi_queue", SPDK_LOG_VHOST_SCSI_QUEUE) 1270 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi_data", SPDK_LOG_VHOST_SCSI_DATA) 1271