xref: /spdk/lib/vhost/vhost_scsi.c (revision f425b985138d03b5420aee3bd94510f7c2ca9c7f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include <linux/virtio_scsi.h>
37 
38 #include "spdk/env.h"
39 #include "spdk/thread.h"
40 #include "spdk/scsi.h"
41 #include "spdk/scsi_spec.h"
42 #include "spdk/conf.h"
43 #include "spdk/event.h"
44 #include "spdk/util.h"
45 #include "spdk/likely.h"
46 
47 #include "spdk/vhost.h"
48 #include "vhost_internal.h"
49 
50 /* Features supported by SPDK VHOST lib. */
51 #define SPDK_VHOST_SCSI_FEATURES	(SPDK_VHOST_FEATURES | \
52 					(1ULL << VIRTIO_SCSI_F_INOUT) | \
53 					(1ULL << VIRTIO_SCSI_F_HOTPLUG) | \
54 					(1ULL << VIRTIO_SCSI_F_CHANGE ) | \
55 					(1ULL << VIRTIO_SCSI_F_T10_PI ))
56 
57 /* Features that are specified in VIRTIO SCSI but currently not supported:
58  * - Live migration not supported yet
59  * - T10 PI
60  */
61 #define SPDK_VHOST_SCSI_DISABLED_FEATURES	(SPDK_VHOST_DISABLED_FEATURES | \
62 						(1ULL << VIRTIO_SCSI_F_T10_PI ))
63 
64 #define MGMT_POLL_PERIOD_US (1000 * 5)
65 
66 #define VIRTIO_SCSI_CONTROLQ   0
67 #define VIRTIO_SCSI_EVENTQ   1
68 #define VIRTIO_SCSI_REQUESTQ   2
69 
70 struct spdk_scsi_dev_vhost_state {
71 	bool removed;
72 	spdk_vhost_event_fn remove_cb;
73 	void *remove_ctx;
74 };
75 
76 struct spdk_vhost_scsi_dev {
77 	struct spdk_vhost_dev vdev;
78 	struct spdk_scsi_dev *scsi_dev[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS];
79 	struct spdk_scsi_dev_vhost_state scsi_dev_state[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS];
80 
81 	struct spdk_poller *requestq_poller;
82 	struct spdk_poller *mgmt_poller;
83 	struct spdk_vhost_dev_destroy_ctx destroy_ctx;
84 } __rte_cache_aligned;
85 
86 struct spdk_vhost_scsi_task {
87 	struct spdk_scsi_task	scsi;
88 	struct iovec iovs[SPDK_VHOST_IOVS_MAX];
89 
90 	union {
91 		struct virtio_scsi_cmd_resp *resp;
92 		struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
93 	};
94 
95 	struct spdk_vhost_scsi_dev *svdev;
96 	struct spdk_scsi_dev *scsi_dev;
97 
98 	/** Number of bytes that were written. */
99 	uint32_t used_len;
100 
101 	int req_idx;
102 
103 	/* If set, the task is currently used for I/O processing. */
104 	bool used;
105 
106 	struct spdk_vhost_virtqueue *vq;
107 };
108 
109 static int spdk_vhost_scsi_start(struct spdk_vhost_dev *, void *);
110 static int spdk_vhost_scsi_stop(struct spdk_vhost_dev *, void *);
111 static void spdk_vhost_scsi_dump_info_json(struct spdk_vhost_dev *vdev,
112 		struct spdk_json_write_ctx *w);
113 static void spdk_vhost_scsi_write_config_json(struct spdk_vhost_dev *vdev,
114 		struct spdk_json_write_ctx *w);
115 static int spdk_vhost_scsi_dev_remove(struct spdk_vhost_dev *vdev);
116 
117 const struct spdk_vhost_dev_backend spdk_vhost_scsi_device_backend = {
118 	.virtio_features = SPDK_VHOST_SCSI_FEATURES,
119 	.disabled_features = SPDK_VHOST_SCSI_DISABLED_FEATURES,
120 	.start_device =  spdk_vhost_scsi_start,
121 	.stop_device = spdk_vhost_scsi_stop,
122 	.dump_info_json = spdk_vhost_scsi_dump_info_json,
123 	.write_config_json = spdk_vhost_scsi_write_config_json,
124 	.remove_device = spdk_vhost_scsi_dev_remove,
125 };
126 
127 static void
128 spdk_vhost_scsi_task_put(struct spdk_vhost_scsi_task *task)
129 {
130 	spdk_scsi_task_put(&task->scsi);
131 }
132 
133 static void
134 spdk_vhost_scsi_task_free_cb(struct spdk_scsi_task *scsi_task)
135 {
136 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
137 
138 	assert(task->svdev->vdev.task_cnt > 0);
139 	task->svdev->vdev.task_cnt--;
140 	task->used = false;
141 }
142 
143 static void
144 process_removed_devs(struct spdk_vhost_scsi_dev *svdev)
145 {
146 	struct spdk_scsi_dev *dev;
147 	struct spdk_scsi_dev_vhost_state *state;
148 	int i;
149 
150 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) {
151 		dev = svdev->scsi_dev[i];
152 		state = &svdev->scsi_dev_state[i];
153 
154 		if (dev && state->removed && !spdk_scsi_dev_has_pending_tasks(dev)) {
155 			spdk_scsi_dev_free_io_channels(dev);
156 			svdev->scsi_dev[i] = NULL;
157 			spdk_scsi_dev_destruct(dev);
158 			if (state->remove_cb) {
159 				state->remove_cb(&svdev->vdev, state->remove_ctx);
160 				state->remove_cb = NULL;
161 			}
162 			SPDK_INFOLOG(SPDK_LOG_VHOST, "%s: hot-detached device 'Dev %u'.\n",
163 				     svdev->vdev.name, i);
164 		}
165 	}
166 }
167 
168 static void
169 eventq_enqueue(struct spdk_vhost_scsi_dev *svdev, unsigned scsi_dev_num, uint32_t event,
170 	       uint32_t reason)
171 {
172 	struct spdk_vhost_virtqueue *vq;
173 	struct vring_desc *desc, *desc_table;
174 	struct virtio_scsi_event *desc_ev;
175 	uint32_t desc_table_size, req_size = 0;
176 	uint16_t req;
177 	int rc;
178 
179 	assert(scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
180 	vq = &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ];
181 
182 	if (spdk_vhost_vq_avail_ring_get(vq, &req, 1) != 1) {
183 		SPDK_ERRLOG("Controller %s: Failed to send virtio event (no avail ring entries?).\n",
184 			    svdev->vdev.name);
185 		return;
186 	}
187 
188 	rc = spdk_vhost_vq_get_desc(&svdev->vdev, vq, req, &desc, &desc_table, &desc_table_size);
189 	if (rc != 0 || desc->len < sizeof(*desc_ev)) {
190 		SPDK_ERRLOG("Controller %s: Invalid eventq descriptor at index %"PRIu16".\n",
191 			    svdev->vdev.name, req);
192 		goto out;
193 	}
194 
195 	desc_ev = spdk_vhost_gpa_to_vva(&svdev->vdev, desc->addr, sizeof(*desc_ev));
196 	if (desc_ev == NULL) {
197 		SPDK_ERRLOG("Controller %s: Eventq descriptor at index %"PRIu16" points to unmapped guest memory address %p.\n",
198 			    svdev->vdev.name, req, (void *)(uintptr_t)desc->addr);
199 		goto out;
200 	}
201 
202 	desc_ev->event = event;
203 	desc_ev->lun[0] = 1;
204 	desc_ev->lun[1] = scsi_dev_num;
205 	/* virtio LUN id 0 can refer either to the entire device
206 	 * or actual LUN 0 (the only supported by vhost for now)
207 	 */
208 	desc_ev->lun[2] = 0 >> 8;
209 	desc_ev->lun[3] = 0 & 0xFF;
210 	/* virtio doesn't specify any strict format for LUN id (bytes 2 and 3)
211 	 * current implementation relies on linux kernel sources
212 	 */
213 	memset(&desc_ev->lun[4], 0, 4);
214 	desc_ev->reason = reason;
215 	req_size = sizeof(*desc_ev);
216 
217 out:
218 	spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, req, req_size);
219 }
220 
221 static void
222 submit_completion(struct spdk_vhost_scsi_task *task)
223 {
224 	spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx,
225 					task->used_len);
226 	SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "Finished task (%p) req_idx=%d\n", task, task->req_idx);
227 
228 	spdk_vhost_scsi_task_put(task);
229 }
230 
231 static void
232 spdk_vhost_scsi_task_mgmt_cpl(struct spdk_scsi_task *scsi_task)
233 {
234 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
235 
236 	submit_completion(task);
237 }
238 
239 static void
240 spdk_vhost_scsi_task_cpl(struct spdk_scsi_task *scsi_task)
241 {
242 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
243 
244 	/* The SCSI task has completed.  Do final processing and then post
245 	   notification to the virtqueue's "used" ring.
246 	 */
247 	task->resp->status = task->scsi.status;
248 
249 	if (task->scsi.status != SPDK_SCSI_STATUS_GOOD) {
250 		memcpy(task->resp->sense, task->scsi.sense_data, task->scsi.sense_data_len);
251 		task->resp->sense_len = task->scsi.sense_data_len;
252 		SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "Task (%p) req_idx=%d failed - status=%u\n", task, task->req_idx,
253 			      task->scsi.status);
254 	}
255 	assert(task->scsi.transfer_len == task->scsi.length);
256 	task->resp->resid = task->scsi.length - task->scsi.data_transferred;
257 
258 	submit_completion(task);
259 }
260 
261 static void
262 task_submit(struct spdk_vhost_scsi_task *task)
263 {
264 	task->resp->response = VIRTIO_SCSI_S_OK;
265 	spdk_scsi_dev_queue_task(task->scsi_dev, &task->scsi);
266 }
267 
268 static void
269 mgmt_task_submit(struct spdk_vhost_scsi_task *task, enum spdk_scsi_task_func func)
270 {
271 	task->tmf_resp->response = VIRTIO_SCSI_S_OK;
272 	spdk_scsi_dev_queue_mgmt_task(task->scsi_dev, &task->scsi, func);
273 }
274 
275 static void
276 invalid_request(struct spdk_vhost_scsi_task *task)
277 {
278 	spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx,
279 					task->used_len);
280 	spdk_vhost_scsi_task_put(task);
281 
282 	SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "Invalid request (status=%" PRIu8")\n",
283 		      task->resp ? task->resp->response : -1);
284 }
285 
286 static int
287 spdk_vhost_scsi_task_init_target(struct spdk_vhost_scsi_task *task, const __u8 *lun)
288 {
289 	struct spdk_scsi_dev *dev;
290 	uint16_t lun_id = (((uint16_t)lun[2] << 8) | lun[3]) & 0x3FFF;
291 
292 	SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_QUEUE, "LUN", lun, 8);
293 
294 	/* First byte must be 1 and second is target */
295 	if (lun[0] != 1 || lun[1] >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
296 		return -1;
297 	}
298 
299 	dev = task->svdev->scsi_dev[lun[1]];
300 	task->scsi_dev = dev;
301 	if (dev == NULL || task->svdev->scsi_dev_state[lun[1]].removed) {
302 		/* If dev has been hotdetached, return 0 to allow sending
303 		 * additional hotremove event via sense codes.
304 		 */
305 		return task->svdev->scsi_dev_state[lun[1]].removed ? 0 : -1;
306 	}
307 
308 	task->scsi.target_port = spdk_scsi_dev_find_port_by_id(task->scsi_dev, 0);
309 	task->scsi.lun = spdk_scsi_dev_get_lun(dev, lun_id);
310 	return 0;
311 }
312 
313 static void
314 process_ctrl_request(struct spdk_vhost_scsi_task *task)
315 {
316 	struct spdk_vhost_dev *vdev = &task->svdev->vdev;
317 	struct vring_desc *desc, *desc_table;
318 	struct virtio_scsi_ctrl_tmf_req *ctrl_req;
319 	struct virtio_scsi_ctrl_an_resp *an_resp;
320 	uint32_t desc_table_size, used_len = 0;
321 	int rc;
322 
323 	spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_mgmt_cpl, spdk_vhost_scsi_task_free_cb);
324 	rc = spdk_vhost_vq_get_desc(vdev, task->vq, task->req_idx, &desc, &desc_table, &desc_table_size);
325 	if (spdk_unlikely(rc != 0)) {
326 		SPDK_ERRLOG("%s: Invalid controlq descriptor at index %d.\n",
327 			    vdev->name, task->req_idx);
328 		goto out;
329 	}
330 
331 	ctrl_req = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*ctrl_req));
332 	if (ctrl_req == NULL) {
333 		SPDK_ERRLOG("%s: Invalid task management request at index %d.\n",
334 			    vdev->name, task->req_idx);
335 		goto out;
336 	}
337 
338 	SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE,
339 		      "Processing controlq descriptor: desc %d/%p, desc_addr %p, len %d, flags %d, last_used_idx %d; kickfd %d; size %d\n",
340 		      task->req_idx, desc, (void *)desc->addr, desc->len, desc->flags, task->vq->vring.last_used_idx,
341 		      task->vq->vring.kickfd, task->vq->vring.size);
342 	SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_QUEUE, "Request descriptor", (uint8_t *)ctrl_req,
343 		       desc->len);
344 
345 	spdk_vhost_scsi_task_init_target(task, ctrl_req->lun);
346 
347 	spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_size);
348 	if (spdk_unlikely(desc == NULL)) {
349 		SPDK_ERRLOG("%s: No response descriptor for controlq request %d.\n",
350 			    vdev->name, task->req_idx);
351 		goto out;
352 	}
353 
354 	/* Process the TMF request */
355 	switch (ctrl_req->type) {
356 	case VIRTIO_SCSI_T_TMF:
357 		task->tmf_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->tmf_resp));
358 		if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_ctrl_tmf_resp) || task->tmf_resp == NULL)) {
359 			SPDK_ERRLOG("%s: TMF response descriptor at index %d points to invalid guest memory region\n",
360 				    vdev->name, task->req_idx);
361 			goto out;
362 		}
363 
364 		/* Check if we are processing a valid request */
365 		if (task->scsi_dev == NULL) {
366 			task->tmf_resp->response = VIRTIO_SCSI_S_BAD_TARGET;
367 			break;
368 		}
369 
370 		switch (ctrl_req->subtype) {
371 		case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
372 			/* Handle LUN reset */
373 			SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "LUN reset\n");
374 
375 			mgmt_task_submit(task, SPDK_SCSI_TASK_FUNC_LUN_RESET);
376 			return;
377 		default:
378 			task->tmf_resp->response = VIRTIO_SCSI_S_ABORTED;
379 			/* Unsupported command */
380 			SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "Unsupported TMF command %x\n", ctrl_req->subtype);
381 			break;
382 		}
383 		break;
384 	case VIRTIO_SCSI_T_AN_QUERY:
385 	case VIRTIO_SCSI_T_AN_SUBSCRIBE: {
386 		an_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*an_resp));
387 		if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_ctrl_an_resp) || an_resp == NULL)) {
388 			SPDK_WARNLOG("%s: Asynchronous response descriptor points to invalid guest memory region\n",
389 				     vdev->name);
390 			goto out;
391 		}
392 
393 		an_resp->response = VIRTIO_SCSI_S_ABORTED;
394 		break;
395 	}
396 	default:
397 		SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "Unsupported control command %x\n", ctrl_req->type);
398 		break;
399 	}
400 
401 	used_len = sizeof(struct virtio_scsi_ctrl_tmf_resp);
402 out:
403 	spdk_vhost_vq_used_ring_enqueue(vdev, task->vq, task->req_idx, used_len);
404 	spdk_vhost_scsi_task_put(task);
405 }
406 
407 /*
408  * Process task's descriptor chain and setup data related fields.
409  * Return
410  *   -1 if request is invalid and must be aborted,
411  *    0 if all data are set.
412  */
413 static int
414 task_data_setup(struct spdk_vhost_scsi_task *task,
415 		struct virtio_scsi_cmd_req **req)
416 {
417 	struct spdk_vhost_dev *vdev = &task->svdev->vdev;
418 	struct vring_desc *desc, *desc_table;
419 	struct iovec *iovs = task->iovs;
420 	uint16_t iovcnt = 0;
421 	uint32_t desc_table_len, len = 0;
422 	int rc;
423 
424 	spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_cpl, spdk_vhost_scsi_task_free_cb);
425 
426 	rc = spdk_vhost_vq_get_desc(vdev, task->vq, task->req_idx, &desc, &desc_table, &desc_table_len);
427 	/* First descriptor must be readable */
428 	if (spdk_unlikely(rc != 0  || spdk_vhost_vring_desc_is_wr(desc) ||
429 			  desc->len < sizeof(struct virtio_scsi_cmd_req))) {
430 		SPDK_WARNLOG("%s: invalid first (request) descriptor at index %"PRIu16".\n",
431 			     vdev->name, task->req_idx);
432 		goto invalid_task;
433 	}
434 
435 	*req = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(**req));
436 	if (spdk_unlikely(*req == NULL)) {
437 		SPDK_WARNLOG("%s: Request descriptor at index %d points to invalid guest memory region\n",
438 			     vdev->name, task->req_idx);
439 		goto invalid_task;
440 	}
441 
442 	/* Each request must have at least 2 descriptors (e.g. request and response) */
443 	spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len);
444 	if (desc == NULL) {
445 		SPDK_WARNLOG("%s: Descriptor chain at index %d contains neither payload nor response buffer.\n",
446 			     vdev->name, task->req_idx);
447 		goto invalid_task;
448 	}
449 	task->scsi.dxfer_dir = spdk_vhost_vring_desc_is_wr(desc) ? SPDK_SCSI_DIR_FROM_DEV :
450 			       SPDK_SCSI_DIR_TO_DEV;
451 	task->scsi.iovs = iovs;
452 
453 	if (task->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV) {
454 		/*
455 		 * FROM_DEV (READ): [RD_req][WR_resp][WR_buf0]...[WR_bufN]
456 		 */
457 		task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->resp));
458 		if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_cmd_resp) || task->resp == NULL)) {
459 			SPDK_WARNLOG("%s: Response descriptor at index %d points to invalid guest memory region\n",
460 				     vdev->name, task->req_idx);
461 			goto invalid_task;
462 		}
463 		rc = spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len);
464 		if (spdk_unlikely(rc != 0)) {
465 			SPDK_WARNLOG("%s: invalid descriptor chain at request index %d (descriptor id overflow?).\n",
466 				     vdev->name, task->req_idx);
467 			goto invalid_task;
468 		}
469 
470 		if (desc == NULL) {
471 			/*
472 			 * TEST UNIT READY command and some others might not contain any payload and this is not an error.
473 			 */
474 			SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA,
475 				      "No payload descriptors for FROM DEV command req_idx=%"PRIu16".\n", task->req_idx);
476 			SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_DATA, "CDB=", (*req)->cdb, VIRTIO_SCSI_CDB_SIZE);
477 			task->used_len = sizeof(struct virtio_scsi_cmd_resp);
478 			task->scsi.iovcnt = 1;
479 			task->scsi.iovs[0].iov_len = 0;
480 			task->scsi.length = 0;
481 			task->scsi.transfer_len = 0;
482 			return 0;
483 		}
484 
485 		/* All remaining descriptors are data. */
486 		while (desc) {
487 			if (spdk_unlikely(!spdk_vhost_vring_desc_is_wr(desc))) {
488 				SPDK_WARNLOG("FROM DEV cmd: descriptor nr %" PRIu16" in payload chain is read only.\n", iovcnt);
489 				goto invalid_task;
490 			}
491 
492 			if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) {
493 				goto invalid_task;
494 			}
495 			len += desc->len;
496 
497 			rc = spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len);
498 			if (spdk_unlikely(rc != 0)) {
499 				SPDK_WARNLOG("%s: invalid payload in descriptor chain starting at index %d.\n",
500 					     vdev->name, task->req_idx);
501 				goto invalid_task;
502 			}
503 		}
504 
505 		task->used_len = sizeof(struct virtio_scsi_cmd_resp) + len;
506 	} else {
507 		SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA, "TO DEV");
508 		/*
509 		 * TO_DEV (WRITE):[RD_req][RD_buf0]...[RD_bufN][WR_resp]
510 		 * No need to check descriptor WR flag as this is done while setting scsi.dxfer_dir.
511 		 */
512 
513 		/* Process descriptors up to response. */
514 		while (!spdk_vhost_vring_desc_is_wr(desc)) {
515 			if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) {
516 				goto invalid_task;
517 			}
518 			len += desc->len;
519 
520 			spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len);
521 			if (spdk_unlikely(desc == NULL)) {
522 				SPDK_WARNLOG("TO_DEV cmd: no response descriptor.\n");
523 				goto invalid_task;
524 			}
525 		}
526 
527 		task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->resp));
528 		if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_cmd_resp) || task->resp == NULL)) {
529 			SPDK_WARNLOG("%s: Response descriptor at index %d points to invalid guest memory region\n",
530 				     vdev->name, task->req_idx);
531 			goto invalid_task;
532 		}
533 
534 		task->used_len = sizeof(struct virtio_scsi_cmd_resp);
535 	}
536 
537 	task->scsi.iovcnt = iovcnt;
538 	task->scsi.length = len;
539 	task->scsi.transfer_len = len;
540 	return 0;
541 
542 invalid_task:
543 	SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA, "%s: Invalid task at index %"PRIu16".\n",
544 		      vdev->name, task->req_idx);
545 	return -1;
546 }
547 
548 static int
549 process_request(struct spdk_vhost_scsi_task *task)
550 {
551 	struct virtio_scsi_cmd_req *req;
552 	int result;
553 
554 	result = task_data_setup(task, &req);
555 	if (result) {
556 		return result;
557 	}
558 
559 	result = spdk_vhost_scsi_task_init_target(task, req->lun);
560 	if (spdk_unlikely(result != 0)) {
561 		task->resp->response = VIRTIO_SCSI_S_BAD_TARGET;
562 		return -1;
563 	}
564 
565 	task->scsi.cdb = req->cdb;
566 	SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_DATA, "request CDB", req->cdb, VIRTIO_SCSI_CDB_SIZE);
567 
568 	if (spdk_unlikely(task->scsi.lun == NULL)) {
569 		spdk_scsi_task_process_null_lun(&task->scsi);
570 		task->resp->response = VIRTIO_SCSI_S_OK;
571 		return 1;
572 	}
573 
574 	return 0;
575 }
576 
577 static void
578 process_controlq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq)
579 {
580 	struct spdk_vhost_scsi_task *task;
581 	uint16_t reqs[32];
582 	uint16_t reqs_cnt, i;
583 
584 	reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs));
585 	for (i = 0; i < reqs_cnt; i++) {
586 		if (spdk_unlikely(reqs[i] >= vq->vring.size)) {
587 			SPDK_ERRLOG("%s: invalid entry in avail ring. Buffer '%"PRIu16"' exceeds virtqueue size (%"PRIu16")\n",
588 				    svdev->vdev.name, reqs[i], vq->vring.size);
589 			spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
590 			continue;
591 		}
592 
593 		task = &((struct spdk_vhost_scsi_task *)vq->tasks)[reqs[i]];
594 		if (spdk_unlikely(task->used)) {
595 			SPDK_ERRLOG("%s: invalid entry in avail ring. Buffer '%"PRIu16"' is still in use!\n",
596 				    svdev->vdev.name, reqs[i]);
597 			spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
598 			continue;
599 		}
600 
601 		svdev->vdev.task_cnt++;
602 		memset(&task->scsi, 0, sizeof(task->scsi));
603 		task->tmf_resp = NULL;
604 		task->used = true;
605 		process_ctrl_request(task);
606 	}
607 }
608 
609 static void
610 process_requestq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq)
611 {
612 	struct spdk_vhost_scsi_task *task;
613 	uint16_t reqs[32];
614 	uint16_t reqs_cnt, i;
615 	int result;
616 
617 	reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs));
618 	assert(reqs_cnt <= 32);
619 
620 	for (i = 0; i < reqs_cnt; i++) {
621 		SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Starting processing request idx %"PRIu16"======\n",
622 			      reqs[i]);
623 
624 		if (spdk_unlikely(reqs[i] >= vq->vring.size)) {
625 			SPDK_ERRLOG("%s: request idx '%"PRIu16"' exceeds virtqueue size (%"PRIu16").\n",
626 				    svdev->vdev.name, reqs[i], vq->vring.size);
627 			spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
628 			continue;
629 		}
630 
631 		task = &((struct spdk_vhost_scsi_task *)vq->tasks)[reqs[i]];
632 		if (spdk_unlikely(task->used)) {
633 			SPDK_ERRLOG("%s: request with idx '%"PRIu16"' is already pending.\n",
634 				    svdev->vdev.name, reqs[i]);
635 			spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
636 			continue;
637 		}
638 
639 		svdev->vdev.task_cnt++;
640 		memset(&task->scsi, 0, sizeof(task->scsi));
641 		task->resp = NULL;
642 		task->used = true;
643 		task->used_len = 0;
644 		result = process_request(task);
645 		if (likely(result == 0)) {
646 			task_submit(task);
647 			SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d submitted ======\n", task,
648 				      task->req_idx);
649 		} else if (result > 0) {
650 			spdk_vhost_scsi_task_cpl(&task->scsi);
651 			SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d finished early ======\n", task,
652 				      task->req_idx);
653 		} else {
654 			invalid_request(task);
655 			SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d failed ======\n", task,
656 				      task->req_idx);
657 		}
658 	}
659 }
660 
661 static int
662 vdev_mgmt_worker(void *arg)
663 {
664 	struct spdk_vhost_scsi_dev *svdev = arg;
665 
666 	process_removed_devs(svdev);
667 	spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ]);
668 
669 	process_controlq(svdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]);
670 	spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]);
671 
672 	return -1;
673 }
674 
675 static int
676 vdev_worker(void *arg)
677 {
678 	struct spdk_vhost_scsi_dev *svdev = arg;
679 	uint32_t q_idx;
680 
681 	for (q_idx = VIRTIO_SCSI_REQUESTQ; q_idx < svdev->vdev.max_queues; q_idx++) {
682 		process_requestq(svdev, &svdev->vdev.virtqueue[q_idx]);
683 	}
684 
685 	spdk_vhost_dev_used_signal(&svdev->vdev);
686 
687 	return -1;
688 }
689 
690 static struct spdk_vhost_scsi_dev *
691 to_scsi_dev(struct spdk_vhost_dev *ctrlr)
692 {
693 	if (ctrlr == NULL) {
694 		return NULL;
695 	}
696 
697 	if (ctrlr->backend != &spdk_vhost_scsi_device_backend) {
698 		SPDK_ERRLOG("%s: not a vhost-scsi device.\n", ctrlr->name);
699 		return NULL;
700 	}
701 
702 	return SPDK_CONTAINEROF(ctrlr, struct spdk_vhost_scsi_dev, vdev);
703 }
704 
705 int
706 spdk_vhost_scsi_dev_construct(const char *name, const char *cpumask)
707 {
708 	struct spdk_vhost_scsi_dev *svdev = spdk_dma_zmalloc(sizeof(struct spdk_vhost_scsi_dev),
709 					    SPDK_CACHE_LINE_SIZE, NULL);
710 	int rc;
711 
712 	if (svdev == NULL) {
713 		return -ENOMEM;
714 	}
715 
716 	spdk_vhost_lock();
717 	rc = spdk_vhost_dev_register(&svdev->vdev, name, cpumask,
718 				     &spdk_vhost_scsi_device_backend);
719 
720 	if (rc) {
721 		spdk_dma_free(svdev);
722 	}
723 
724 	spdk_vhost_unlock();
725 	return rc;
726 }
727 
728 static int
729 spdk_vhost_scsi_dev_remove(struct spdk_vhost_dev *vdev)
730 {
731 	struct spdk_vhost_scsi_dev *svdev = to_scsi_dev(vdev);
732 	int rc, i;
733 
734 	if (svdev == NULL) {
735 		return -EINVAL;
736 	}
737 
738 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) {
739 		if (svdev->scsi_dev[i]) {
740 			if (vdev->registered) {
741 				SPDK_ERRLOG("Trying to remove non-empty controller: %s.\n", vdev->name);
742 				return -EBUSY;
743 			}
744 
745 			rc = spdk_vhost_scsi_dev_remove_tgt(vdev, i, NULL, NULL);
746 			if (rc != 0) {
747 				SPDK_ERRLOG("%s: failed to force-remove target %d\n", vdev->name, i);
748 				return rc;
749 			}
750 		}
751 	}
752 
753 	rc = spdk_vhost_dev_unregister(vdev);
754 	if (rc != 0) {
755 		return rc;
756 	}
757 
758 	spdk_dma_free(svdev);
759 	return 0;
760 }
761 
762 struct spdk_scsi_dev *
763 spdk_vhost_scsi_dev_get_tgt(struct spdk_vhost_dev *vdev, uint8_t num)
764 {
765 	struct spdk_vhost_scsi_dev *svdev;
766 
767 	assert(num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
768 	svdev = to_scsi_dev(vdev);
769 
770 	return svdev ? svdev->scsi_dev[num] : NULL;
771 }
772 
773 static void
774 spdk_vhost_scsi_lun_hotremove(const struct spdk_scsi_lun *lun, void *arg)
775 {
776 	struct spdk_vhost_scsi_dev *svdev = arg;
777 	const struct spdk_scsi_dev *scsi_dev;
778 	unsigned scsi_dev_num;
779 
780 	assert(lun != NULL);
781 	assert(svdev != NULL);
782 	if (svdev->vdev.lcore != -1 &&
783 	    !spdk_vhost_dev_has_feature(&svdev->vdev, VIRTIO_SCSI_F_HOTPLUG)) {
784 		SPDK_WARNLOG("%s: hotremove is not enabled for this controller.\n", svdev->vdev.name);
785 		return;
786 	}
787 
788 	scsi_dev = spdk_scsi_lun_get_dev(lun);
789 	for (scsi_dev_num = 0; scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; scsi_dev_num++) {
790 		if (svdev->scsi_dev[scsi_dev_num] == scsi_dev) {
791 			break;
792 		}
793 	}
794 
795 	if (scsi_dev_num == SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
796 		/* The entire device has been already removed. */
797 		return;
798 	}
799 
800 	/* remove entire device */
801 	spdk_vhost_scsi_dev_remove_tgt(&svdev->vdev, scsi_dev_num, NULL, NULL);
802 }
803 
804 int
805 spdk_vhost_scsi_dev_add_tgt(struct spdk_vhost_dev *vdev, unsigned scsi_tgt_num,
806 			    const char *bdev_name)
807 {
808 	struct spdk_vhost_scsi_dev *svdev;
809 	char target_name[SPDK_SCSI_DEV_MAX_NAME];
810 	int lun_id_list[1];
811 	const char *bdev_names_list[1];
812 
813 	svdev = to_scsi_dev(vdev);
814 	if (svdev == NULL) {
815 		return -EINVAL;
816 	}
817 
818 	if (scsi_tgt_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
819 		SPDK_ERRLOG("Controller %d target number too big (max %d)\n", scsi_tgt_num,
820 			    SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
821 		return -EINVAL;
822 	}
823 
824 	if (bdev_name == NULL) {
825 		SPDK_ERRLOG("No lun name specified\n");
826 		return -EINVAL;
827 	}
828 
829 	if (svdev->scsi_dev[scsi_tgt_num] != NULL) {
830 		SPDK_ERRLOG("Controller %s target %u already occupied\n", vdev->name, scsi_tgt_num);
831 		return -EEXIST;
832 	}
833 
834 	/*
835 	 * At this stage only one LUN per target
836 	 */
837 	snprintf(target_name, sizeof(target_name), "Target %u", scsi_tgt_num);
838 	lun_id_list[0] = 0;
839 	bdev_names_list[0] = (char *)bdev_name;
840 
841 	svdev->scsi_dev_state[scsi_tgt_num].removed = false;
842 	svdev->scsi_dev[scsi_tgt_num] = spdk_scsi_dev_construct(target_name, bdev_names_list, lun_id_list,
843 					1,
844 					SPDK_SPC_PROTOCOL_IDENTIFIER_SAS, spdk_vhost_scsi_lun_hotremove, svdev);
845 
846 	if (svdev->scsi_dev[scsi_tgt_num] == NULL) {
847 		SPDK_ERRLOG("Couldn't create spdk SCSI target '%s' using bdev '%s' in controller: %s\n",
848 			    target_name, bdev_name, vdev->name);
849 		return -EINVAL;
850 	}
851 	spdk_scsi_dev_add_port(svdev->scsi_dev[scsi_tgt_num], 0, "vhost");
852 
853 	SPDK_INFOLOG(SPDK_LOG_VHOST, "Controller %s: defined target '%s' using bdev '%s'\n",
854 		     vdev->name, target_name, bdev_name);
855 
856 	if (vdev->lcore == -1) {
857 		/* All done. */
858 		return 0;
859 	}
860 
861 	spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[scsi_tgt_num]);
862 
863 	if (spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
864 		eventq_enqueue(svdev, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET,
865 			       VIRTIO_SCSI_EVT_RESET_RESCAN);
866 	} else {
867 		SPDK_NOTICELOG("Device %s does not support hotplug. "
868 			       "Please restart the driver or perform a rescan.\n",
869 			       vdev->name);
870 	}
871 
872 	return 0;
873 }
874 
875 int
876 spdk_vhost_scsi_dev_remove_tgt(struct spdk_vhost_dev *vdev, unsigned scsi_tgt_num,
877 			       spdk_vhost_event_fn cb_fn, void *cb_arg)
878 {
879 	struct spdk_vhost_scsi_dev *svdev;
880 	struct spdk_scsi_dev *scsi_dev;
881 	struct spdk_scsi_dev_vhost_state *scsi_dev_state;
882 	int rc = 0;
883 
884 	if (scsi_tgt_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
885 		SPDK_ERRLOG("%s: invalid target number %d\n", vdev->name, scsi_tgt_num);
886 		return -EINVAL;
887 	}
888 
889 	svdev = to_scsi_dev(vdev);
890 	if (svdev == NULL) {
891 		return -ENODEV;
892 	}
893 
894 	scsi_dev = svdev->scsi_dev[scsi_tgt_num];
895 	if (scsi_dev == NULL) {
896 		SPDK_ERRLOG("Controller %s target %u is not occupied\n", vdev->name, scsi_tgt_num);
897 		return -ENODEV;
898 	}
899 
900 	if (svdev->vdev.lcore == -1) {
901 		/* controller is not in use, remove dev and exit */
902 		svdev->scsi_dev[scsi_tgt_num] = NULL;
903 		spdk_scsi_dev_destruct(scsi_dev);
904 		if (cb_fn) {
905 			rc = cb_fn(vdev, cb_arg);
906 		}
907 		SPDK_INFOLOG(SPDK_LOG_VHOST, "%s: removed target 'Target %u'\n",
908 			     vdev->name, scsi_tgt_num);
909 		return rc;
910 	}
911 
912 	if (!spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
913 		SPDK_WARNLOG("%s: 'Target %u' is in use and hot-detach is not enabled for this controller.\n",
914 			     svdev->vdev.name, scsi_tgt_num);
915 		return -ENOTSUP;
916 	}
917 
918 	scsi_dev_state = &svdev->scsi_dev_state[scsi_tgt_num];
919 	if (scsi_dev_state->removed) {
920 		SPDK_WARNLOG("%s: 'Target %u' has been already marked to hotremove.\n", svdev->vdev.name,
921 			     scsi_tgt_num);
922 		return -EBUSY;
923 	}
924 
925 	scsi_dev_state->remove_cb = cb_fn;
926 	scsi_dev_state->remove_ctx = cb_arg;
927 	scsi_dev_state->removed = true;
928 	eventq_enqueue(svdev, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_REMOVED);
929 
930 	SPDK_INFOLOG(SPDK_LOG_VHOST, "%s: queued 'Target %u' for hot-detach.\n", vdev->name, scsi_tgt_num);
931 	return 0;
932 }
933 
934 int
935 spdk_vhost_scsi_controller_construct(void)
936 {
937 	struct spdk_conf_section *sp = spdk_conf_first_section(NULL);
938 	struct spdk_vhost_dev *vdev;
939 	int i, dev_num;
940 	unsigned ctrlr_num = 0;
941 	char *bdev_name, *tgt_num_str;
942 	char *cpumask;
943 	char *name;
944 	char *tgt = NULL;
945 
946 	while (sp != NULL) {
947 		if (!spdk_conf_section_match_prefix(sp, "VhostScsi")) {
948 			sp = spdk_conf_next_section(sp);
949 			continue;
950 		}
951 
952 		if (sscanf(spdk_conf_section_get_name(sp), "VhostScsi%u", &ctrlr_num) != 1) {
953 			SPDK_ERRLOG("Section '%s' has non-numeric suffix.\n",
954 				    spdk_conf_section_get_name(sp));
955 			return -1;
956 		}
957 
958 		name =  spdk_conf_section_get_val(sp, "Name");
959 		cpumask = spdk_conf_section_get_val(sp, "Cpumask");
960 
961 		if (spdk_vhost_scsi_dev_construct(name, cpumask) < 0) {
962 			return -1;
963 		}
964 
965 		vdev = spdk_vhost_dev_find(name);
966 		assert(vdev);
967 
968 		for (i = 0; ; i++) {
969 
970 			tgt = spdk_conf_section_get_nval(sp, "Target", i);
971 			if (tgt == NULL) {
972 				break;
973 			}
974 
975 			tgt_num_str = spdk_conf_section_get_nmval(sp, "Target", i, 0);
976 			if (tgt_num_str == NULL) {
977 				SPDK_ERRLOG("%s: Invalid or missing target number\n", name);
978 				return -1;
979 			}
980 
981 			dev_num = (int)strtol(tgt_num_str, NULL, 10);
982 			bdev_name = spdk_conf_section_get_nmval(sp, "Target", i, 1);
983 			if (bdev_name == NULL) {
984 				SPDK_ERRLOG("%s: Invalid or missing bdev name for target %d\n", name, dev_num);
985 				return -1;
986 			} else if (spdk_conf_section_get_nmval(sp, "Target", i, 2)) {
987 				SPDK_ERRLOG("%s: Only one LUN per vhost SCSI device supported\n", name);
988 				return -1;
989 			}
990 
991 			if (spdk_vhost_scsi_dev_add_tgt(vdev, dev_num, bdev_name) < 0) {
992 				return -1;
993 			}
994 		}
995 
996 		sp = spdk_conf_next_section(sp);
997 	}
998 
999 	return 0;
1000 }
1001 
1002 static void
1003 free_task_pool(struct spdk_vhost_scsi_dev *svdev)
1004 {
1005 	struct spdk_vhost_virtqueue *vq;
1006 	uint16_t i;
1007 
1008 	for (i = 0; i < svdev->vdev.max_queues; i++) {
1009 		vq = &svdev->vdev.virtqueue[i];
1010 		if (vq->tasks == NULL) {
1011 			continue;
1012 		}
1013 
1014 		spdk_dma_free(vq->tasks);
1015 		vq->tasks = NULL;
1016 	}
1017 }
1018 
1019 static int
1020 alloc_task_pool(struct spdk_vhost_scsi_dev *svdev)
1021 {
1022 	struct spdk_vhost_virtqueue *vq;
1023 	struct spdk_vhost_scsi_task *task;
1024 	uint32_t task_cnt;
1025 	uint16_t i;
1026 	uint32_t j;
1027 
1028 	for (i = 0; i < svdev->vdev.max_queues; i++) {
1029 		vq = &svdev->vdev.virtqueue[i];
1030 		if (vq->vring.desc == NULL) {
1031 			continue;
1032 		}
1033 
1034 		task_cnt = vq->vring.size;
1035 		if (task_cnt > SPDK_VHOST_MAX_VQ_SIZE) {
1036 			/* sanity check */
1037 			SPDK_ERRLOG("Controller %s: virtuque %"PRIu16" is too big. (size = %"PRIu32", max = %"PRIu32")\n",
1038 				    svdev->vdev.name, i, task_cnt, SPDK_VHOST_MAX_VQ_SIZE);
1039 			free_task_pool(svdev);
1040 			return -1;
1041 		}
1042 		vq->tasks = spdk_dma_zmalloc(sizeof(struct spdk_vhost_scsi_task) * task_cnt,
1043 					     SPDK_CACHE_LINE_SIZE, NULL);
1044 		if (vq->tasks == NULL) {
1045 			SPDK_ERRLOG("Controller %s: failed to allocate %"PRIu32" tasks for virtqueue %"PRIu16"\n",
1046 				    svdev->vdev.name, task_cnt, i);
1047 			free_task_pool(svdev);
1048 			return -1;
1049 		}
1050 
1051 		for (j = 0; j < task_cnt; j++) {
1052 			task = &((struct spdk_vhost_scsi_task *)vq->tasks)[j];
1053 			task->svdev = svdev;
1054 			task->vq = vq;
1055 			task->req_idx = j;
1056 		}
1057 	}
1058 
1059 	return 0;
1060 }
1061 
1062 /*
1063  * A new device is added to a data core. First the device is added to the main linked list
1064  * and then allocated to a specific data core.
1065  */
1066 static int
1067 spdk_vhost_scsi_start(struct spdk_vhost_dev *vdev, void *event_ctx)
1068 {
1069 	struct spdk_vhost_scsi_dev *svdev;
1070 	uint32_t i;
1071 	int rc;
1072 
1073 	svdev = to_scsi_dev(vdev);
1074 	if (svdev == NULL) {
1075 		SPDK_ERRLOG("Trying to start non-scsi controller as a scsi one.\n");
1076 		rc = -1;
1077 		goto out;
1078 	}
1079 
1080 	/* validate all I/O queues are in a contiguous index range */
1081 	for (i = VIRTIO_SCSI_REQUESTQ; i < vdev->max_queues; i++) {
1082 		if (vdev->virtqueue[i].vring.desc == NULL) {
1083 			SPDK_ERRLOG("%s: queue %"PRIu32" is empty\n", vdev->name, i);
1084 			rc = -1;
1085 			goto out;
1086 		}
1087 	}
1088 
1089 	rc = alloc_task_pool(svdev);
1090 	if (rc != 0) {
1091 		SPDK_ERRLOG("%s: failed to alloc task pool.\n", vdev->name);
1092 		goto out;
1093 	}
1094 
1095 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {
1096 		if (svdev->scsi_dev[i] == NULL) {
1097 			continue;
1098 		}
1099 		spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[i]);
1100 	}
1101 	SPDK_INFOLOG(SPDK_LOG_VHOST, "Started poller for vhost controller %s on lcore %d\n",
1102 		     vdev->name, vdev->lcore);
1103 
1104 	svdev->requestq_poller = spdk_poller_register(vdev_worker, svdev, 0);
1105 	if (vdev->virtqueue[VIRTIO_SCSI_CONTROLQ].vring.desc &&
1106 	    vdev->virtqueue[VIRTIO_SCSI_EVENTQ].vring.desc) {
1107 		svdev->mgmt_poller = spdk_poller_register(vdev_mgmt_worker, svdev,
1108 				     MGMT_POLL_PERIOD_US);
1109 	}
1110 out:
1111 	spdk_vhost_dev_backend_event_done(event_ctx, rc);
1112 	return rc;
1113 }
1114 
1115 static int
1116 destroy_device_poller_cb(void *arg)
1117 {
1118 	struct spdk_vhost_scsi_dev *svdev = arg;
1119 	uint32_t i;
1120 
1121 	if (svdev->vdev.task_cnt > 0) {
1122 		return -1;
1123 	}
1124 
1125 
1126 	for (i = 0; i < svdev->vdev.max_queues; i++) {
1127 		spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[i]);
1128 	}
1129 
1130 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {
1131 		if (svdev->scsi_dev[i] == NULL) {
1132 			continue;
1133 		}
1134 		spdk_scsi_dev_free_io_channels(svdev->scsi_dev[i]);
1135 	}
1136 
1137 	SPDK_INFOLOG(SPDK_LOG_VHOST, "Stopping poller for vhost controller %s\n", svdev->vdev.name);
1138 
1139 	free_task_pool(svdev);
1140 
1141 	spdk_poller_unregister(&svdev->destroy_ctx.poller);
1142 	spdk_vhost_dev_backend_event_done(svdev->destroy_ctx.event_ctx, 0);
1143 
1144 	return -1;
1145 }
1146 
1147 static int
1148 spdk_vhost_scsi_stop(struct spdk_vhost_dev *vdev, void *event_ctx)
1149 {
1150 	struct spdk_vhost_scsi_dev *svdev;
1151 
1152 	svdev = to_scsi_dev(vdev);
1153 	if (svdev == NULL) {
1154 		SPDK_ERRLOG("Trying to stop non-scsi controller as a scsi one.\n");
1155 		goto err;
1156 	}
1157 
1158 	svdev->destroy_ctx.event_ctx = event_ctx;
1159 	spdk_poller_unregister(&svdev->requestq_poller);
1160 	spdk_poller_unregister(&svdev->mgmt_poller);
1161 	svdev->destroy_ctx.poller = spdk_poller_register(destroy_device_poller_cb, svdev,
1162 				    1000);
1163 
1164 	return 0;
1165 
1166 err:
1167 	spdk_vhost_dev_backend_event_done(event_ctx, -1);
1168 	return -1;
1169 }
1170 
1171 static void
1172 spdk_vhost_scsi_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
1173 {
1174 	struct spdk_scsi_dev *sdev;
1175 	struct spdk_scsi_lun *lun;
1176 	uint32_t dev_idx;
1177 	uint32_t lun_idx;
1178 
1179 	assert(vdev != NULL);
1180 	spdk_json_write_name(w, "scsi");
1181 	spdk_json_write_array_begin(w);
1182 	for (dev_idx = 0; dev_idx < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; dev_idx++) {
1183 		sdev = spdk_vhost_scsi_dev_get_tgt(vdev, dev_idx);
1184 		if (!sdev) {
1185 			continue;
1186 		}
1187 
1188 		spdk_json_write_object_begin(w);
1189 
1190 		spdk_json_write_name(w, "scsi_dev_num");
1191 		spdk_json_write_uint32(w, dev_idx);
1192 
1193 		spdk_json_write_name(w, "id");
1194 		spdk_json_write_int32(w, spdk_scsi_dev_get_id(sdev));
1195 
1196 		spdk_json_write_name(w, "target_name");
1197 		spdk_json_write_string(w, spdk_scsi_dev_get_name(sdev));
1198 
1199 		spdk_json_write_name(w, "luns");
1200 		spdk_json_write_array_begin(w);
1201 
1202 		for (lun_idx = 0; lun_idx < SPDK_SCSI_DEV_MAX_LUN; lun_idx++) {
1203 			lun = spdk_scsi_dev_get_lun(sdev, lun_idx);
1204 			if (!lun) {
1205 				continue;
1206 			}
1207 
1208 			spdk_json_write_object_begin(w);
1209 
1210 			spdk_json_write_name(w, "id");
1211 			spdk_json_write_int32(w, spdk_scsi_lun_get_id(lun));
1212 
1213 			spdk_json_write_name(w, "bdev_name");
1214 			spdk_json_write_string(w, spdk_scsi_lun_get_bdev_name(lun));
1215 
1216 			spdk_json_write_object_end(w);
1217 		}
1218 
1219 		spdk_json_write_array_end(w);
1220 		spdk_json_write_object_end(w);
1221 	}
1222 
1223 	spdk_json_write_array_end(w);
1224 }
1225 
1226 static void
1227 spdk_vhost_scsi_write_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
1228 {
1229 	struct spdk_vhost_scsi_dev *svdev;
1230 	struct spdk_scsi_lun *lun;
1231 	uint32_t i;
1232 
1233 	svdev = to_scsi_dev(vdev);
1234 	if (!svdev) {
1235 		return;
1236 	}
1237 
1238 	spdk_json_write_object_begin(w);
1239 	spdk_json_write_named_string(w, "method", "construct_vhost_scsi_controller");
1240 
1241 	spdk_json_write_named_object_begin(w, "params");
1242 	spdk_json_write_named_string(w, "ctrlr", vdev->name);
1243 	spdk_json_write_named_string(w, "cpumask", spdk_cpuset_fmt(vdev->cpumask));
1244 	spdk_json_write_object_end(w);
1245 
1246 	spdk_json_write_object_end(w);
1247 
1248 	for (i = 0; i < SPDK_COUNTOF(svdev->scsi_dev); i++) {
1249 		if (svdev->scsi_dev[i] == NULL || svdev->scsi_dev_state[i].removed) {
1250 			continue;
1251 		}
1252 
1253 		lun = spdk_scsi_dev_get_lun(svdev->scsi_dev[i], 0);
1254 
1255 		spdk_json_write_object_begin(w);
1256 		spdk_json_write_named_string(w, "method", "add_vhost_scsi_lun");
1257 
1258 		spdk_json_write_named_object_begin(w, "params");
1259 		spdk_json_write_named_string(w, "ctrlr", vdev->name);
1260 		spdk_json_write_named_uint32(w, "scsi_target_num", i);
1261 
1262 		spdk_json_write_named_string(w, "bdev_name", spdk_scsi_lun_get_bdev_name(lun));
1263 		spdk_json_write_object_end(w);
1264 
1265 		spdk_json_write_object_end(w);
1266 	}
1267 }
1268 
1269 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi", SPDK_LOG_VHOST_SCSI)
1270 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi_queue", SPDK_LOG_VHOST_SCSI_QUEUE)
1271 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi_data", SPDK_LOG_VHOST_SCSI_DATA)
1272