1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include <linux/virtio_scsi.h> 37 38 #include "spdk/env.h" 39 #include "spdk/thread.h" 40 #include "spdk/scsi.h" 41 #include "spdk/scsi_spec.h" 42 #include "spdk/conf.h" 43 #include "spdk/event.h" 44 #include "spdk/util.h" 45 #include "spdk/likely.h" 46 47 #include "spdk/vhost.h" 48 #include "vhost_internal.h" 49 50 /* Features supported by SPDK VHOST lib. */ 51 #define SPDK_VHOST_SCSI_FEATURES (SPDK_VHOST_FEATURES | \ 52 (1ULL << VIRTIO_SCSI_F_INOUT) | \ 53 (1ULL << VIRTIO_SCSI_F_HOTPLUG) | \ 54 (1ULL << VIRTIO_SCSI_F_CHANGE ) | \ 55 (1ULL << VIRTIO_SCSI_F_T10_PI )) 56 57 /* Features that are specified in VIRTIO SCSI but currently not supported: 58 * - Live migration not supported yet 59 * - T10 PI 60 */ 61 #define SPDK_VHOST_SCSI_DISABLED_FEATURES (SPDK_VHOST_DISABLED_FEATURES | \ 62 (1ULL << VIRTIO_SCSI_F_T10_PI )) 63 64 #define MGMT_POLL_PERIOD_US (1000 * 5) 65 66 #define VIRTIO_SCSI_CONTROLQ 0 67 #define VIRTIO_SCSI_EVENTQ 1 68 #define VIRTIO_SCSI_REQUESTQ 2 69 70 struct spdk_scsi_dev_vhost_state { 71 bool removed; 72 spdk_vhost_event_fn remove_cb; 73 void *remove_ctx; 74 }; 75 76 struct spdk_vhost_scsi_dev { 77 struct spdk_vhost_dev vdev; 78 struct spdk_scsi_dev *scsi_dev[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS]; 79 struct spdk_scsi_dev_vhost_state scsi_dev_state[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS]; 80 81 struct spdk_poller *requestq_poller; 82 struct spdk_poller *mgmt_poller; 83 } __rte_cache_aligned; 84 85 struct spdk_vhost_scsi_task { 86 struct spdk_scsi_task scsi; 87 struct iovec iovs[SPDK_VHOST_IOVS_MAX]; 88 89 union { 90 struct virtio_scsi_cmd_resp *resp; 91 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 92 }; 93 94 struct spdk_vhost_scsi_dev *svdev; 95 struct spdk_scsi_dev *scsi_dev; 96 97 /** Number of bytes that were written. */ 98 uint32_t used_len; 99 100 int req_idx; 101 102 /* If set, the task is currently used for I/O processing. */ 103 bool used; 104 105 struct spdk_vhost_virtqueue *vq; 106 }; 107 108 static int spdk_vhost_scsi_start(struct spdk_vhost_dev *, void *); 109 static int spdk_vhost_scsi_stop(struct spdk_vhost_dev *, void *); 110 static void spdk_vhost_scsi_dump_info_json(struct spdk_vhost_dev *vdev, 111 struct spdk_json_write_ctx *w); 112 static void spdk_vhost_scsi_write_config_json(struct spdk_vhost_dev *vdev, 113 struct spdk_json_write_ctx *w); 114 static int spdk_vhost_scsi_dev_remove(struct spdk_vhost_dev *vdev); 115 116 const struct spdk_vhost_dev_backend spdk_vhost_scsi_device_backend = { 117 .virtio_features = SPDK_VHOST_SCSI_FEATURES, 118 .disabled_features = SPDK_VHOST_SCSI_DISABLED_FEATURES, 119 .start_device = spdk_vhost_scsi_start, 120 .stop_device = spdk_vhost_scsi_stop, 121 .dump_info_json = spdk_vhost_scsi_dump_info_json, 122 .write_config_json = spdk_vhost_scsi_write_config_json, 123 .remove_device = spdk_vhost_scsi_dev_remove, 124 }; 125 126 static void 127 spdk_vhost_scsi_task_put(struct spdk_vhost_scsi_task *task) 128 { 129 spdk_scsi_task_put(&task->scsi); 130 } 131 132 static void 133 spdk_vhost_scsi_task_free_cb(struct spdk_scsi_task *scsi_task) 134 { 135 struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi); 136 137 assert(task->svdev->vdev.task_cnt > 0); 138 task->svdev->vdev.task_cnt--; 139 task->used = false; 140 } 141 142 static void 143 process_removed_devs(struct spdk_vhost_scsi_dev *svdev) 144 { 145 struct spdk_scsi_dev *dev; 146 struct spdk_scsi_dev_vhost_state *state; 147 int i; 148 149 for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) { 150 dev = svdev->scsi_dev[i]; 151 state = &svdev->scsi_dev_state[i]; 152 153 if (dev && state->removed && !spdk_scsi_dev_has_pending_tasks(dev)) { 154 spdk_scsi_dev_free_io_channels(dev); 155 svdev->scsi_dev[i] = NULL; 156 spdk_scsi_dev_destruct(dev); 157 if (state->remove_cb) { 158 state->remove_cb(&svdev->vdev, state->remove_ctx); 159 state->remove_cb = NULL; 160 } 161 SPDK_INFOLOG(SPDK_LOG_VHOST, "%s: hot-detached device 'Dev %u'.\n", 162 svdev->vdev.name, i); 163 } 164 } 165 } 166 167 static void 168 eventq_enqueue(struct spdk_vhost_scsi_dev *svdev, unsigned scsi_dev_num, uint32_t event, 169 uint32_t reason) 170 { 171 struct spdk_vhost_virtqueue *vq; 172 struct vring_desc *desc, *desc_table; 173 struct virtio_scsi_event *desc_ev; 174 uint32_t desc_table_size, req_size = 0; 175 uint16_t req; 176 int rc; 177 178 assert(scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS); 179 vq = &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ]; 180 181 if (spdk_vhost_vq_avail_ring_get(vq, &req, 1) != 1) { 182 SPDK_ERRLOG("Controller %s: Failed to send virtio event (no avail ring entries?).\n", 183 svdev->vdev.name); 184 return; 185 } 186 187 rc = spdk_vhost_vq_get_desc(&svdev->vdev, vq, req, &desc, &desc_table, &desc_table_size); 188 if (rc != 0 || desc->len < sizeof(*desc_ev)) { 189 SPDK_ERRLOG("Controller %s: Invalid eventq descriptor at index %"PRIu16".\n", 190 svdev->vdev.name, req); 191 goto out; 192 } 193 194 desc_ev = spdk_vhost_gpa_to_vva(&svdev->vdev, desc->addr, sizeof(*desc_ev)); 195 if (desc_ev == NULL) { 196 SPDK_ERRLOG("Controller %s: Eventq descriptor at index %"PRIu16" points to unmapped guest memory address %p.\n", 197 svdev->vdev.name, req, (void *)(uintptr_t)desc->addr); 198 goto out; 199 } 200 201 desc_ev->event = event; 202 desc_ev->lun[0] = 1; 203 desc_ev->lun[1] = scsi_dev_num; 204 /* virtio LUN id 0 can refer either to the entire device 205 * or actual LUN 0 (the only supported by vhost for now) 206 */ 207 desc_ev->lun[2] = 0 >> 8; 208 desc_ev->lun[3] = 0 & 0xFF; 209 /* virtio doesn't specify any strict format for LUN id (bytes 2 and 3) 210 * current implementation relies on linux kernel sources 211 */ 212 memset(&desc_ev->lun[4], 0, 4); 213 desc_ev->reason = reason; 214 req_size = sizeof(*desc_ev); 215 216 out: 217 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, req, req_size); 218 } 219 220 static void 221 submit_completion(struct spdk_vhost_scsi_task *task) 222 { 223 spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx, 224 task->used_len); 225 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "Finished task (%p) req_idx=%d\n", task, task->req_idx); 226 227 spdk_vhost_scsi_task_put(task); 228 } 229 230 static void 231 spdk_vhost_scsi_task_mgmt_cpl(struct spdk_scsi_task *scsi_task) 232 { 233 struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi); 234 235 submit_completion(task); 236 } 237 238 static void 239 spdk_vhost_scsi_task_cpl(struct spdk_scsi_task *scsi_task) 240 { 241 struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi); 242 243 /* The SCSI task has completed. Do final processing and then post 244 notification to the virtqueue's "used" ring. 245 */ 246 task->resp->status = task->scsi.status; 247 248 if (task->scsi.status != SPDK_SCSI_STATUS_GOOD) { 249 memcpy(task->resp->sense, task->scsi.sense_data, task->scsi.sense_data_len); 250 task->resp->sense_len = task->scsi.sense_data_len; 251 } 252 assert(task->scsi.transfer_len == task->scsi.length); 253 task->resp->resid = task->scsi.length - task->scsi.data_transferred; 254 255 submit_completion(task); 256 } 257 258 static void 259 task_submit(struct spdk_vhost_scsi_task *task) 260 { 261 task->resp->response = VIRTIO_SCSI_S_OK; 262 spdk_scsi_dev_queue_task(task->scsi_dev, &task->scsi); 263 } 264 265 static void 266 mgmt_task_submit(struct spdk_vhost_scsi_task *task, enum spdk_scsi_task_func func) 267 { 268 task->tmf_resp->response = VIRTIO_SCSI_S_OK; 269 spdk_scsi_dev_queue_mgmt_task(task->scsi_dev, &task->scsi, func); 270 } 271 272 static void 273 invalid_request(struct spdk_vhost_scsi_task *task) 274 { 275 spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx, 276 task->used_len); 277 spdk_vhost_scsi_task_put(task); 278 279 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "Invalid request (status=%" PRIu8")\n", 280 task->resp ? task->resp->response : -1); 281 } 282 283 static int 284 spdk_vhost_scsi_task_init_target(struct spdk_vhost_scsi_task *task, const __u8 *lun) 285 { 286 struct spdk_scsi_dev *dev; 287 uint16_t lun_id = (((uint16_t)lun[2] << 8) | lun[3]) & 0x3FFF; 288 289 SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_QUEUE, "LUN", lun, 8); 290 291 /* First byte must be 1 and second is target */ 292 if (lun[0] != 1 || lun[1] >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) { 293 return -1; 294 } 295 296 dev = task->svdev->scsi_dev[lun[1]]; 297 task->scsi_dev = dev; 298 if (dev == NULL || task->svdev->scsi_dev_state[lun[1]].removed) { 299 /* If dev has been hotdetached, return 0 to allow sending 300 * additional hotremove event via sense codes. 301 */ 302 return task->svdev->scsi_dev_state[lun[1]].removed ? 0 : -1; 303 } 304 305 task->scsi.target_port = spdk_scsi_dev_find_port_by_id(task->scsi_dev, 0); 306 task->scsi.lun = spdk_scsi_dev_get_lun(dev, lun_id); 307 return 0; 308 } 309 310 static void 311 process_ctrl_request(struct spdk_vhost_scsi_task *task) 312 { 313 struct spdk_vhost_dev *vdev = &task->svdev->vdev; 314 struct vring_desc *desc, *desc_table; 315 struct virtio_scsi_ctrl_tmf_req *ctrl_req; 316 struct virtio_scsi_ctrl_an_resp *an_resp; 317 uint32_t desc_table_size, used_len = 0; 318 int rc; 319 320 spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_mgmt_cpl, spdk_vhost_scsi_task_free_cb); 321 rc = spdk_vhost_vq_get_desc(vdev, task->vq, task->req_idx, &desc, &desc_table, &desc_table_size); 322 if (spdk_unlikely(rc != 0)) { 323 SPDK_ERRLOG("%s: Invalid controlq descriptor at index %d.\n", 324 vdev->name, task->req_idx); 325 goto out; 326 } 327 328 ctrl_req = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*ctrl_req)); 329 if (ctrl_req == NULL) { 330 SPDK_ERRLOG("%s: Invalid task management request at index %d.\n", 331 vdev->name, task->req_idx); 332 goto out; 333 } 334 335 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, 336 "Processing controlq descriptor: desc %d/%p, desc_addr %p, len %d, flags %d, last_used_idx %d; kickfd %d; size %d\n", 337 task->req_idx, desc, (void *)desc->addr, desc->len, desc->flags, task->vq->vring.last_used_idx, 338 task->vq->vring.kickfd, task->vq->vring.size); 339 SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_QUEUE, "Request descriptor", (uint8_t *)ctrl_req, 340 desc->len); 341 342 spdk_vhost_scsi_task_init_target(task, ctrl_req->lun); 343 344 spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_size); 345 if (spdk_unlikely(desc == NULL)) { 346 SPDK_ERRLOG("%s: No response descriptor for controlq request %d.\n", 347 vdev->name, task->req_idx); 348 goto out; 349 } 350 351 /* Process the TMF request */ 352 switch (ctrl_req->type) { 353 case VIRTIO_SCSI_T_TMF: 354 task->tmf_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->tmf_resp)); 355 if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_ctrl_tmf_resp) || task->tmf_resp == NULL)) { 356 SPDK_ERRLOG("%s: TMF response descriptor at index %d points to invalid guest memory region\n", 357 vdev->name, task->req_idx); 358 goto out; 359 } 360 361 /* Check if we are processing a valid request */ 362 if (task->scsi_dev == NULL) { 363 task->tmf_resp->response = VIRTIO_SCSI_S_BAD_TARGET; 364 break; 365 } 366 367 switch (ctrl_req->subtype) { 368 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET: 369 /* Handle LUN reset */ 370 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "LUN reset\n"); 371 372 mgmt_task_submit(task, SPDK_SCSI_TASK_FUNC_LUN_RESET); 373 return; 374 default: 375 task->tmf_resp->response = VIRTIO_SCSI_S_ABORTED; 376 /* Unsupported command */ 377 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "Unsupported TMF command %x\n", ctrl_req->subtype); 378 break; 379 } 380 break; 381 case VIRTIO_SCSI_T_AN_QUERY: 382 case VIRTIO_SCSI_T_AN_SUBSCRIBE: { 383 an_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*an_resp)); 384 if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_ctrl_an_resp) || an_resp == NULL)) { 385 SPDK_WARNLOG("%s: Asynchronous response descriptor points to invalid guest memory region\n", 386 vdev->name); 387 goto out; 388 } 389 390 an_resp->response = VIRTIO_SCSI_S_ABORTED; 391 break; 392 } 393 default: 394 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "Unsupported control command %x\n", ctrl_req->type); 395 break; 396 } 397 398 used_len = sizeof(struct virtio_scsi_ctrl_tmf_resp); 399 out: 400 spdk_vhost_vq_used_ring_enqueue(vdev, task->vq, task->req_idx, used_len); 401 spdk_vhost_scsi_task_put(task); 402 } 403 404 /* 405 * Process task's descriptor chain and setup data related fields. 406 * Return 407 * -1 if request is invalid and must be aborted, 408 * 0 if all data are set. 409 */ 410 static int 411 task_data_setup(struct spdk_vhost_scsi_task *task, 412 struct virtio_scsi_cmd_req **req) 413 { 414 struct spdk_vhost_dev *vdev = &task->svdev->vdev; 415 struct vring_desc *desc, *desc_table; 416 struct iovec *iovs = task->iovs; 417 uint16_t iovcnt = 0; 418 uint32_t desc_table_len, len = 0; 419 int rc; 420 421 spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_cpl, spdk_vhost_scsi_task_free_cb); 422 423 rc = spdk_vhost_vq_get_desc(vdev, task->vq, task->req_idx, &desc, &desc_table, &desc_table_len); 424 /* First descriptor must be readable */ 425 if (spdk_unlikely(rc != 0 || spdk_vhost_vring_desc_is_wr(desc) || 426 desc->len < sizeof(struct virtio_scsi_cmd_req))) { 427 SPDK_WARNLOG("%s: invalid first (request) descriptor at index %"PRIu16".\n", 428 vdev->name, task->req_idx); 429 goto invalid_task; 430 } 431 432 *req = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(**req)); 433 if (spdk_unlikely(*req == NULL)) { 434 SPDK_WARNLOG("%s: Request descriptor at index %d points to invalid guest memory region\n", 435 vdev->name, task->req_idx); 436 goto invalid_task; 437 } 438 439 /* Each request must have at least 2 descriptors (e.g. request and response) */ 440 spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len); 441 if (desc == NULL) { 442 SPDK_WARNLOG("%s: Descriptor chain at index %d contains neither payload nor response buffer.\n", 443 vdev->name, task->req_idx); 444 goto invalid_task; 445 } 446 task->scsi.dxfer_dir = spdk_vhost_vring_desc_is_wr(desc) ? SPDK_SCSI_DIR_FROM_DEV : 447 SPDK_SCSI_DIR_TO_DEV; 448 task->scsi.iovs = iovs; 449 450 if (task->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV) { 451 /* 452 * FROM_DEV (READ): [RD_req][WR_resp][WR_buf0]...[WR_bufN] 453 */ 454 task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->resp)); 455 if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_cmd_resp) || task->resp == NULL)) { 456 SPDK_WARNLOG("%s: Response descriptor at index %d points to invalid guest memory region\n", 457 vdev->name, task->req_idx); 458 goto invalid_task; 459 } 460 rc = spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len); 461 if (spdk_unlikely(rc != 0)) { 462 SPDK_WARNLOG("%s: invalid descriptor chain at request index %d (descriptor id overflow?).\n", 463 vdev->name, task->req_idx); 464 goto invalid_task; 465 } 466 467 if (desc == NULL) { 468 /* 469 * TEST UNIT READY command and some others might not contain any payload and this is not an error. 470 */ 471 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA, 472 "No payload descriptors for FROM DEV command req_idx=%"PRIu16".\n", task->req_idx); 473 SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_DATA, "CDB=", (*req)->cdb, VIRTIO_SCSI_CDB_SIZE); 474 task->used_len = sizeof(struct virtio_scsi_cmd_resp); 475 task->scsi.iovcnt = 1; 476 task->scsi.iovs[0].iov_len = 0; 477 task->scsi.length = 0; 478 task->scsi.transfer_len = 0; 479 return 0; 480 } 481 482 /* All remaining descriptors are data. */ 483 while (desc) { 484 if (spdk_unlikely(!spdk_vhost_vring_desc_is_wr(desc))) { 485 SPDK_WARNLOG("FROM DEV cmd: descriptor nr %" PRIu16" in payload chain is read only.\n", iovcnt); 486 goto invalid_task; 487 } 488 489 if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) { 490 goto invalid_task; 491 } 492 len += desc->len; 493 494 rc = spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len); 495 if (spdk_unlikely(rc != 0)) { 496 SPDK_WARNLOG("%s: invalid payload in descriptor chain starting at index %d.\n", 497 vdev->name, task->req_idx); 498 goto invalid_task; 499 } 500 } 501 502 task->used_len = sizeof(struct virtio_scsi_cmd_resp) + len; 503 } else { 504 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA, "TO DEV"); 505 /* 506 * TO_DEV (WRITE):[RD_req][RD_buf0]...[RD_bufN][WR_resp] 507 * No need to check descriptor WR flag as this is done while setting scsi.dxfer_dir. 508 */ 509 510 /* Process descriptors up to response. */ 511 while (!spdk_vhost_vring_desc_is_wr(desc)) { 512 if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) { 513 goto invalid_task; 514 } 515 len += desc->len; 516 517 spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len); 518 if (spdk_unlikely(desc == NULL)) { 519 SPDK_WARNLOG("TO_DEV cmd: no response descriptor.\n"); 520 goto invalid_task; 521 } 522 } 523 524 task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->resp)); 525 if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_cmd_resp) || task->resp == NULL)) { 526 SPDK_WARNLOG("%s: Response descriptor at index %d points to invalid guest memory region\n", 527 vdev->name, task->req_idx); 528 goto invalid_task; 529 } 530 531 task->used_len = sizeof(struct virtio_scsi_cmd_resp); 532 } 533 534 task->scsi.iovcnt = iovcnt; 535 task->scsi.length = len; 536 task->scsi.transfer_len = len; 537 return 0; 538 539 invalid_task: 540 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA, "%s: Invalid task at index %"PRIu16".\n", 541 vdev->name, task->req_idx); 542 return -1; 543 } 544 545 static int 546 process_request(struct spdk_vhost_scsi_task *task) 547 { 548 struct virtio_scsi_cmd_req *req; 549 int result; 550 551 result = task_data_setup(task, &req); 552 if (result) { 553 return result; 554 } 555 556 result = spdk_vhost_scsi_task_init_target(task, req->lun); 557 if (spdk_unlikely(result != 0)) { 558 task->resp->response = VIRTIO_SCSI_S_BAD_TARGET; 559 return -1; 560 } 561 562 task->scsi.cdb = req->cdb; 563 SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_DATA, "request CDB", req->cdb, VIRTIO_SCSI_CDB_SIZE); 564 565 if (spdk_unlikely(task->scsi.lun == NULL)) { 566 spdk_scsi_task_process_null_lun(&task->scsi); 567 task->resp->response = VIRTIO_SCSI_S_OK; 568 return 1; 569 } 570 571 return 0; 572 } 573 574 static void 575 process_controlq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq) 576 { 577 struct spdk_vhost_scsi_task *task; 578 uint16_t reqs[32]; 579 uint16_t reqs_cnt, i; 580 581 reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs)); 582 for (i = 0; i < reqs_cnt; i++) { 583 if (spdk_unlikely(reqs[i] >= vq->vring.size)) { 584 SPDK_ERRLOG("%s: invalid entry in avail ring. Buffer '%"PRIu16"' exceeds virtqueue size (%"PRIu16")\n", 585 svdev->vdev.name, reqs[i], vq->vring.size); 586 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0); 587 continue; 588 } 589 590 task = &((struct spdk_vhost_scsi_task *)vq->tasks)[reqs[i]]; 591 if (spdk_unlikely(task->used)) { 592 SPDK_ERRLOG("%s: invalid entry in avail ring. Buffer '%"PRIu16"' is still in use!\n", 593 svdev->vdev.name, reqs[i]); 594 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0); 595 continue; 596 } 597 598 svdev->vdev.task_cnt++; 599 memset(&task->scsi, 0, sizeof(task->scsi)); 600 task->tmf_resp = NULL; 601 task->used = true; 602 process_ctrl_request(task); 603 } 604 } 605 606 static void 607 process_requestq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq) 608 { 609 struct spdk_vhost_scsi_task *task; 610 uint16_t reqs[32]; 611 uint16_t reqs_cnt, i; 612 int result; 613 614 reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs)); 615 assert(reqs_cnt <= 32); 616 617 for (i = 0; i < reqs_cnt; i++) { 618 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Starting processing request idx %"PRIu16"======\n", 619 reqs[i]); 620 621 if (spdk_unlikely(reqs[i] >= vq->vring.size)) { 622 SPDK_ERRLOG("%s: request idx '%"PRIu16"' exceeds virtqueue size (%"PRIu16").\n", 623 svdev->vdev.name, reqs[i], vq->vring.size); 624 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0); 625 continue; 626 } 627 628 task = &((struct spdk_vhost_scsi_task *)vq->tasks)[reqs[i]]; 629 if (spdk_unlikely(task->used)) { 630 SPDK_ERRLOG("%s: request with idx '%"PRIu16"' is already pending.\n", 631 svdev->vdev.name, reqs[i]); 632 spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0); 633 continue; 634 } 635 636 svdev->vdev.task_cnt++; 637 memset(&task->scsi, 0, sizeof(task->scsi)); 638 task->resp = NULL; 639 task->used = true; 640 task->used_len = 0; 641 result = process_request(task); 642 if (likely(result == 0)) { 643 task_submit(task); 644 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d submitted ======\n", task, 645 task->req_idx); 646 } else if (result > 0) { 647 spdk_vhost_scsi_task_cpl(&task->scsi); 648 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d finished early ======\n", task, 649 task->req_idx); 650 } else { 651 invalid_request(task); 652 SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d failed ======\n", task, 653 task->req_idx); 654 } 655 } 656 } 657 658 static int 659 vdev_mgmt_worker(void *arg) 660 { 661 struct spdk_vhost_scsi_dev *svdev = arg; 662 663 process_removed_devs(svdev); 664 spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ]); 665 666 process_controlq(svdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]); 667 spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]); 668 669 return -1; 670 } 671 672 static int 673 vdev_worker(void *arg) 674 { 675 struct spdk_vhost_scsi_dev *svdev = arg; 676 uint32_t q_idx; 677 678 for (q_idx = VIRTIO_SCSI_REQUESTQ; q_idx < svdev->vdev.max_queues; q_idx++) { 679 process_requestq(svdev, &svdev->vdev.virtqueue[q_idx]); 680 } 681 682 spdk_vhost_dev_used_signal(&svdev->vdev); 683 684 return -1; 685 } 686 687 static struct spdk_vhost_scsi_dev * 688 to_scsi_dev(struct spdk_vhost_dev *ctrlr) 689 { 690 if (ctrlr == NULL) { 691 return NULL; 692 } 693 694 if (ctrlr->backend != &spdk_vhost_scsi_device_backend) { 695 SPDK_ERRLOG("%s: not a vhost-scsi device.\n", ctrlr->name); 696 return NULL; 697 } 698 699 return SPDK_CONTAINEROF(ctrlr, struct spdk_vhost_scsi_dev, vdev); 700 } 701 702 int 703 spdk_vhost_scsi_dev_construct(const char *name, const char *cpumask) 704 { 705 struct spdk_vhost_scsi_dev *svdev = spdk_dma_zmalloc(sizeof(struct spdk_vhost_scsi_dev), 706 SPDK_CACHE_LINE_SIZE, NULL); 707 int rc; 708 709 if (svdev == NULL) { 710 return -ENOMEM; 711 } 712 713 spdk_vhost_lock(); 714 rc = spdk_vhost_dev_register(&svdev->vdev, name, cpumask, 715 &spdk_vhost_scsi_device_backend); 716 717 if (rc) { 718 spdk_dma_free(svdev); 719 } 720 721 spdk_vhost_unlock(); 722 return rc; 723 } 724 725 static int 726 spdk_vhost_scsi_dev_remove(struct spdk_vhost_dev *vdev) 727 { 728 struct spdk_vhost_scsi_dev *svdev = to_scsi_dev(vdev); 729 int rc, i; 730 731 if (svdev == NULL) { 732 return -EINVAL; 733 } 734 735 for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) { 736 if (svdev->scsi_dev[i]) { 737 if (vdev->registered) { 738 SPDK_ERRLOG("Trying to remove non-empty controller: %s.\n", vdev->name); 739 return -EBUSY; 740 } 741 742 rc = spdk_vhost_scsi_dev_remove_tgt(vdev, i, NULL, NULL); 743 if (rc != 0) { 744 SPDK_ERRLOG("%s: failed to force-remove target %d\n", vdev->name, i); 745 return rc; 746 } 747 } 748 } 749 750 rc = spdk_vhost_dev_unregister(vdev); 751 if (rc != 0) { 752 return rc; 753 } 754 755 spdk_dma_free(svdev); 756 return 0; 757 } 758 759 struct spdk_scsi_dev * 760 spdk_vhost_scsi_dev_get_tgt(struct spdk_vhost_dev *vdev, uint8_t num) 761 { 762 struct spdk_vhost_scsi_dev *svdev; 763 764 assert(num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS); 765 svdev = to_scsi_dev(vdev); 766 767 return svdev ? svdev->scsi_dev[num] : NULL; 768 } 769 770 static void 771 spdk_vhost_scsi_lun_hotremove(const struct spdk_scsi_lun *lun, void *arg) 772 { 773 struct spdk_vhost_scsi_dev *svdev = arg; 774 const struct spdk_scsi_dev *scsi_dev; 775 unsigned scsi_dev_num; 776 777 assert(lun != NULL); 778 assert(svdev != NULL); 779 if (svdev->vdev.lcore != -1 && 780 !spdk_vhost_dev_has_feature(&svdev->vdev, VIRTIO_SCSI_F_HOTPLUG)) { 781 SPDK_WARNLOG("%s: hotremove is not enabled for this controller.\n", svdev->vdev.name); 782 return; 783 } 784 785 scsi_dev = spdk_scsi_lun_get_dev(lun); 786 for (scsi_dev_num = 0; scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; scsi_dev_num++) { 787 if (svdev->scsi_dev[scsi_dev_num] == scsi_dev) { 788 break; 789 } 790 } 791 792 if (scsi_dev_num == SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) { 793 /* The entire device has been already removed. */ 794 return; 795 } 796 797 /* remove entire device */ 798 spdk_vhost_scsi_dev_remove_tgt(&svdev->vdev, scsi_dev_num, NULL, NULL); 799 } 800 801 int 802 spdk_vhost_scsi_dev_add_tgt(struct spdk_vhost_dev *vdev, unsigned scsi_tgt_num, 803 const char *bdev_name) 804 { 805 struct spdk_vhost_scsi_dev *svdev; 806 char target_name[SPDK_SCSI_DEV_MAX_NAME]; 807 int lun_id_list[1]; 808 const char *bdev_names_list[1]; 809 810 svdev = to_scsi_dev(vdev); 811 if (svdev == NULL) { 812 return -EINVAL; 813 } 814 815 if (scsi_tgt_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) { 816 SPDK_ERRLOG("Controller %d target number too big (max %d)\n", scsi_tgt_num, 817 SPDK_VHOST_SCSI_CTRLR_MAX_DEVS); 818 return -EINVAL; 819 } 820 821 if (bdev_name == NULL) { 822 SPDK_ERRLOG("No lun name specified\n"); 823 return -EINVAL; 824 } 825 826 if (svdev->scsi_dev[scsi_tgt_num] != NULL) { 827 SPDK_ERRLOG("Controller %s target %u already occupied\n", vdev->name, scsi_tgt_num); 828 return -EEXIST; 829 } 830 831 /* 832 * At this stage only one LUN per target 833 */ 834 snprintf(target_name, sizeof(target_name), "Target %u", scsi_tgt_num); 835 lun_id_list[0] = 0; 836 bdev_names_list[0] = (char *)bdev_name; 837 838 svdev->scsi_dev_state[scsi_tgt_num].removed = false; 839 svdev->scsi_dev[scsi_tgt_num] = spdk_scsi_dev_construct(target_name, bdev_names_list, lun_id_list, 840 1, 841 SPDK_SPC_PROTOCOL_IDENTIFIER_SAS, spdk_vhost_scsi_lun_hotremove, svdev); 842 843 if (svdev->scsi_dev[scsi_tgt_num] == NULL) { 844 SPDK_ERRLOG("Couldn't create spdk SCSI target '%s' using bdev '%s' in controller: %s\n", 845 target_name, bdev_name, vdev->name); 846 return -EINVAL; 847 } 848 spdk_scsi_dev_add_port(svdev->scsi_dev[scsi_tgt_num], 0, "vhost"); 849 850 SPDK_INFOLOG(SPDK_LOG_VHOST, "Controller %s: defined target '%s' using bdev '%s'\n", 851 vdev->name, target_name, bdev_name); 852 853 if (vdev->lcore == -1) { 854 /* All done. */ 855 return 0; 856 } 857 858 spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[scsi_tgt_num]); 859 860 if (spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { 861 eventq_enqueue(svdev, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, 862 VIRTIO_SCSI_EVT_RESET_RESCAN); 863 } else { 864 SPDK_NOTICELOG("Device %s does not support hotplug. " 865 "Please restart the driver or perform a rescan.\n", 866 vdev->name); 867 } 868 869 return 0; 870 } 871 872 int 873 spdk_vhost_scsi_dev_remove_tgt(struct spdk_vhost_dev *vdev, unsigned scsi_tgt_num, 874 spdk_vhost_event_fn cb_fn, void *cb_arg) 875 { 876 struct spdk_vhost_scsi_dev *svdev; 877 struct spdk_scsi_dev *scsi_dev; 878 struct spdk_scsi_dev_vhost_state *scsi_dev_state; 879 int rc = 0; 880 881 if (scsi_tgt_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) { 882 SPDK_ERRLOG("%s: invalid target number %d\n", vdev->name, scsi_tgt_num); 883 return -EINVAL; 884 } 885 886 svdev = to_scsi_dev(vdev); 887 if (svdev == NULL) { 888 return -ENODEV; 889 } 890 891 scsi_dev = svdev->scsi_dev[scsi_tgt_num]; 892 if (scsi_dev == NULL) { 893 SPDK_ERRLOG("Controller %s target %u is not occupied\n", vdev->name, scsi_tgt_num); 894 return -ENODEV; 895 } 896 897 if (svdev->vdev.lcore == -1) { 898 /* controller is not in use, remove dev and exit */ 899 svdev->scsi_dev[scsi_tgt_num] = NULL; 900 spdk_scsi_dev_destruct(scsi_dev); 901 if (cb_fn) { 902 rc = cb_fn(vdev, cb_arg); 903 } 904 SPDK_INFOLOG(SPDK_LOG_VHOST, "%s: removed target 'Target %u'\n", 905 vdev->name, scsi_tgt_num); 906 return rc; 907 } 908 909 if (!spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { 910 SPDK_WARNLOG("%s: 'Target %u' is in use and hot-detach is not enabled for this controller.\n", 911 svdev->vdev.name, scsi_tgt_num); 912 return -ENOTSUP; 913 } 914 915 scsi_dev_state = &svdev->scsi_dev_state[scsi_tgt_num]; 916 if (scsi_dev_state->removed) { 917 SPDK_WARNLOG("%s: 'Target %u' has been already marked to hotremove.\n", svdev->vdev.name, 918 scsi_tgt_num); 919 return -EBUSY; 920 } 921 922 scsi_dev_state->remove_cb = cb_fn; 923 scsi_dev_state->remove_ctx = cb_arg; 924 scsi_dev_state->removed = true; 925 eventq_enqueue(svdev, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_REMOVED); 926 927 SPDK_INFOLOG(SPDK_LOG_VHOST, "%s: queued 'Target %u' for hot-detach.\n", vdev->name, scsi_tgt_num); 928 return 0; 929 } 930 931 int 932 spdk_vhost_scsi_controller_construct(void) 933 { 934 struct spdk_conf_section *sp = spdk_conf_first_section(NULL); 935 struct spdk_vhost_dev *vdev; 936 int i, dev_num; 937 unsigned ctrlr_num = 0; 938 char *bdev_name, *tgt_num_str; 939 char *cpumask; 940 char *name; 941 char *tgt = NULL; 942 943 while (sp != NULL) { 944 if (!spdk_conf_section_match_prefix(sp, "VhostScsi")) { 945 sp = spdk_conf_next_section(sp); 946 continue; 947 } 948 949 if (sscanf(spdk_conf_section_get_name(sp), "VhostScsi%u", &ctrlr_num) != 1) { 950 SPDK_ERRLOG("Section '%s' has non-numeric suffix.\n", 951 spdk_conf_section_get_name(sp)); 952 return -1; 953 } 954 955 name = spdk_conf_section_get_val(sp, "Name"); 956 cpumask = spdk_conf_section_get_val(sp, "Cpumask"); 957 958 if (spdk_vhost_scsi_dev_construct(name, cpumask) < 0) { 959 return -1; 960 } 961 962 vdev = spdk_vhost_dev_find(name); 963 assert(vdev); 964 965 for (i = 0; ; i++) { 966 967 tgt = spdk_conf_section_get_nval(sp, "Target", i); 968 if (tgt == NULL) { 969 break; 970 } 971 972 tgt_num_str = spdk_conf_section_get_nmval(sp, "Target", i, 0); 973 if (tgt_num_str == NULL) { 974 SPDK_ERRLOG("%s: Invalid or missing target number\n", name); 975 return -1; 976 } 977 978 dev_num = (int)strtol(tgt_num_str, NULL, 10); 979 bdev_name = spdk_conf_section_get_nmval(sp, "Target", i, 1); 980 if (bdev_name == NULL) { 981 SPDK_ERRLOG("%s: Invalid or missing bdev name for target %d\n", name, dev_num); 982 return -1; 983 } else if (spdk_conf_section_get_nmval(sp, "Target", i, 2)) { 984 SPDK_ERRLOG("%s: Only one LUN per vhost SCSI device supported\n", name); 985 return -1; 986 } 987 988 if (spdk_vhost_scsi_dev_add_tgt(vdev, dev_num, bdev_name) < 0) { 989 return -1; 990 } 991 } 992 993 sp = spdk_conf_next_section(sp); 994 } 995 996 return 0; 997 } 998 999 static void 1000 free_task_pool(struct spdk_vhost_scsi_dev *svdev) 1001 { 1002 struct spdk_vhost_virtqueue *vq; 1003 uint16_t i; 1004 1005 for (i = 0; i < svdev->vdev.max_queues; i++) { 1006 vq = &svdev->vdev.virtqueue[i]; 1007 if (vq->tasks == NULL) { 1008 continue; 1009 } 1010 1011 spdk_dma_free(vq->tasks); 1012 vq->tasks = NULL; 1013 } 1014 } 1015 1016 static int 1017 alloc_task_pool(struct spdk_vhost_scsi_dev *svdev) 1018 { 1019 struct spdk_vhost_virtqueue *vq; 1020 struct spdk_vhost_scsi_task *task; 1021 uint32_t task_cnt; 1022 uint16_t i; 1023 uint32_t j; 1024 1025 for (i = 0; i < svdev->vdev.max_queues; i++) { 1026 vq = &svdev->vdev.virtqueue[i]; 1027 if (vq->vring.desc == NULL) { 1028 continue; 1029 } 1030 1031 task_cnt = vq->vring.size; 1032 if (task_cnt > SPDK_VHOST_MAX_VQ_SIZE) { 1033 /* sanity check */ 1034 SPDK_ERRLOG("Controller %s: virtuque %"PRIu16" is too big. (size = %"PRIu32", max = %"PRIu32")\n", 1035 svdev->vdev.name, i, task_cnt, SPDK_VHOST_MAX_VQ_SIZE); 1036 free_task_pool(svdev); 1037 return -1; 1038 } 1039 vq->tasks = spdk_dma_zmalloc(sizeof(struct spdk_vhost_scsi_task) * task_cnt, 1040 SPDK_CACHE_LINE_SIZE, NULL); 1041 if (vq->tasks == NULL) { 1042 SPDK_ERRLOG("Controller %s: failed to allocate %"PRIu32" tasks for virtqueue %"PRIu16"\n", 1043 svdev->vdev.name, task_cnt, i); 1044 free_task_pool(svdev); 1045 return -1; 1046 } 1047 1048 for (j = 0; j < task_cnt; j++) { 1049 task = &((struct spdk_vhost_scsi_task *)vq->tasks)[j]; 1050 task->svdev = svdev; 1051 task->vq = vq; 1052 task->req_idx = j; 1053 } 1054 } 1055 1056 return 0; 1057 } 1058 1059 /* 1060 * A new device is added to a data core. First the device is added to the main linked list 1061 * and then allocated to a specific data core. 1062 */ 1063 static int 1064 spdk_vhost_scsi_start(struct spdk_vhost_dev *vdev, void *event_ctx) 1065 { 1066 struct spdk_vhost_scsi_dev *svdev; 1067 uint32_t i; 1068 int rc; 1069 1070 svdev = to_scsi_dev(vdev); 1071 if (svdev == NULL) { 1072 SPDK_ERRLOG("Trying to start non-scsi controller as a scsi one.\n"); 1073 rc = -1; 1074 goto out; 1075 } 1076 1077 /* validate all I/O queues are in a contiguous index range */ 1078 for (i = VIRTIO_SCSI_REQUESTQ; i < vdev->max_queues; i++) { 1079 if (vdev->virtqueue[i].vring.desc == NULL) { 1080 SPDK_ERRLOG("%s: queue %"PRIu32" is empty\n", vdev->name, i); 1081 rc = -1; 1082 goto out; 1083 } 1084 } 1085 1086 rc = alloc_task_pool(svdev); 1087 if (rc != 0) { 1088 SPDK_ERRLOG("%s: failed to alloc task pool.\n", vdev->name); 1089 goto out; 1090 } 1091 1092 for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) { 1093 if (svdev->scsi_dev[i] == NULL) { 1094 continue; 1095 } 1096 spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[i]); 1097 } 1098 SPDK_INFOLOG(SPDK_LOG_VHOST, "Started poller for vhost controller %s on lcore %d\n", 1099 vdev->name, vdev->lcore); 1100 1101 svdev->requestq_poller = spdk_poller_register(vdev_worker, svdev, 0); 1102 if (vdev->virtqueue[VIRTIO_SCSI_CONTROLQ].vring.desc && 1103 vdev->virtqueue[VIRTIO_SCSI_EVENTQ].vring.desc) { 1104 svdev->mgmt_poller = spdk_poller_register(vdev_mgmt_worker, svdev, 1105 MGMT_POLL_PERIOD_US); 1106 } 1107 out: 1108 spdk_vhost_dev_backend_event_done(event_ctx, rc); 1109 return rc; 1110 } 1111 1112 struct spdk_vhost_dev_destroy_ctx { 1113 struct spdk_vhost_scsi_dev *svdev; 1114 struct spdk_poller *poller; 1115 void *event_ctx; 1116 }; 1117 1118 static int 1119 destroy_device_poller_cb(void *arg) 1120 { 1121 struct spdk_vhost_dev_destroy_ctx *ctx = arg; 1122 struct spdk_vhost_scsi_dev *svdev = ctx->svdev; 1123 uint32_t i; 1124 1125 if (svdev->vdev.task_cnt > 0) { 1126 return -1; 1127 } 1128 1129 1130 for (i = 0; i < svdev->vdev.max_queues; i++) { 1131 spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[i]); 1132 } 1133 1134 for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) { 1135 if (svdev->scsi_dev[i] == NULL) { 1136 continue; 1137 } 1138 spdk_scsi_dev_free_io_channels(svdev->scsi_dev[i]); 1139 } 1140 1141 SPDK_INFOLOG(SPDK_LOG_VHOST, "Stopping poller for vhost controller %s\n", svdev->vdev.name); 1142 1143 free_task_pool(svdev); 1144 1145 spdk_poller_unregister(&ctx->poller); 1146 spdk_vhost_dev_backend_event_done(ctx->event_ctx, 0); 1147 spdk_dma_free(ctx); 1148 1149 return -1; 1150 } 1151 1152 static int 1153 spdk_vhost_scsi_stop(struct spdk_vhost_dev *vdev, void *event_ctx) 1154 { 1155 struct spdk_vhost_scsi_dev *svdev; 1156 struct spdk_vhost_dev_destroy_ctx *destroy_ctx; 1157 1158 svdev = to_scsi_dev(vdev); 1159 if (svdev == NULL) { 1160 SPDK_ERRLOG("Trying to stop non-scsi controller as a scsi one.\n"); 1161 goto err; 1162 } 1163 1164 destroy_ctx = spdk_dma_zmalloc(sizeof(*destroy_ctx), SPDK_CACHE_LINE_SIZE, NULL); 1165 if (destroy_ctx == NULL) { 1166 SPDK_ERRLOG("Failed to alloc memory for destroying device.\n"); 1167 goto err; 1168 } 1169 1170 destroy_ctx->svdev = svdev; 1171 destroy_ctx->event_ctx = event_ctx; 1172 1173 spdk_poller_unregister(&svdev->requestq_poller); 1174 spdk_poller_unregister(&svdev->mgmt_poller); 1175 destroy_ctx->poller = spdk_poller_register(destroy_device_poller_cb, destroy_ctx, 1176 1000); 1177 1178 return 0; 1179 1180 err: 1181 spdk_vhost_dev_backend_event_done(event_ctx, -1); 1182 return -1; 1183 } 1184 1185 static void 1186 spdk_vhost_scsi_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w) 1187 { 1188 struct spdk_scsi_dev *sdev; 1189 struct spdk_scsi_lun *lun; 1190 uint32_t dev_idx; 1191 uint32_t lun_idx; 1192 1193 assert(vdev != NULL); 1194 spdk_json_write_name(w, "scsi"); 1195 spdk_json_write_array_begin(w); 1196 for (dev_idx = 0; dev_idx < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; dev_idx++) { 1197 sdev = spdk_vhost_scsi_dev_get_tgt(vdev, dev_idx); 1198 if (!sdev) { 1199 continue; 1200 } 1201 1202 spdk_json_write_object_begin(w); 1203 1204 spdk_json_write_name(w, "scsi_dev_num"); 1205 spdk_json_write_uint32(w, dev_idx); 1206 1207 spdk_json_write_name(w, "id"); 1208 spdk_json_write_int32(w, spdk_scsi_dev_get_id(sdev)); 1209 1210 spdk_json_write_name(w, "target_name"); 1211 spdk_json_write_string(w, spdk_scsi_dev_get_name(sdev)); 1212 1213 spdk_json_write_name(w, "luns"); 1214 spdk_json_write_array_begin(w); 1215 1216 for (lun_idx = 0; lun_idx < SPDK_SCSI_DEV_MAX_LUN; lun_idx++) { 1217 lun = spdk_scsi_dev_get_lun(sdev, lun_idx); 1218 if (!lun) { 1219 continue; 1220 } 1221 1222 spdk_json_write_object_begin(w); 1223 1224 spdk_json_write_name(w, "id"); 1225 spdk_json_write_int32(w, spdk_scsi_lun_get_id(lun)); 1226 1227 spdk_json_write_name(w, "bdev_name"); 1228 spdk_json_write_string(w, spdk_scsi_lun_get_bdev_name(lun)); 1229 1230 spdk_json_write_object_end(w); 1231 } 1232 1233 spdk_json_write_array_end(w); 1234 spdk_json_write_object_end(w); 1235 } 1236 1237 spdk_json_write_array_end(w); 1238 } 1239 1240 static void 1241 spdk_vhost_scsi_write_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w) 1242 { 1243 struct spdk_vhost_scsi_dev *svdev; 1244 struct spdk_scsi_lun *lun; 1245 uint32_t i; 1246 1247 svdev = to_scsi_dev(vdev); 1248 if (!svdev) { 1249 return; 1250 } 1251 1252 spdk_json_write_object_begin(w); 1253 spdk_json_write_named_string(w, "method", "construct_vhost_scsi_controller"); 1254 1255 spdk_json_write_named_object_begin(w, "params"); 1256 spdk_json_write_named_string(w, "ctrlr", vdev->name); 1257 spdk_json_write_named_string(w, "cpumask", spdk_cpuset_fmt(vdev->cpumask)); 1258 spdk_json_write_object_end(w); 1259 1260 spdk_json_write_object_end(w); 1261 1262 for (i = 0; i < SPDK_COUNTOF(svdev->scsi_dev); i++) { 1263 if (svdev->scsi_dev[i] == NULL || svdev->scsi_dev_state[i].removed) { 1264 continue; 1265 } 1266 1267 lun = spdk_scsi_dev_get_lun(svdev->scsi_dev[i], 0); 1268 1269 spdk_json_write_object_begin(w); 1270 spdk_json_write_named_string(w, "method", "add_vhost_scsi_lun"); 1271 1272 spdk_json_write_named_object_begin(w, "params"); 1273 spdk_json_write_named_string(w, "ctrlr", vdev->name); 1274 spdk_json_write_named_uint32(w, "scsi_target_num", i); 1275 1276 spdk_json_write_named_string(w, "bdev_name", spdk_scsi_lun_get_bdev_name(lun)); 1277 spdk_json_write_object_end(w); 1278 1279 spdk_json_write_object_end(w); 1280 } 1281 } 1282 1283 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi", SPDK_LOG_VHOST_SCSI) 1284 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi_queue", SPDK_LOG_VHOST_SCSI_QUEUE) 1285 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi_data", SPDK_LOG_VHOST_SCSI_DATA) 1286