xref: /spdk/lib/vhost/vhost_scsi.c (revision b119facb65247c714030aa19f3f0528bcd28a834)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include <linux/virtio_scsi.h>
37 
38 #include "spdk/env.h"
39 #include "spdk/io_channel.h"
40 #include "spdk/scsi.h"
41 #include "spdk/scsi_spec.h"
42 #include "spdk/conf.h"
43 #include "spdk/event.h"
44 #include "spdk/util.h"
45 #include "spdk/likely.h"
46 
47 #include "spdk/vhost.h"
48 #include "vhost_internal.h"
49 
50 /* Features supported by SPDK VHOST lib. */
51 #define SPDK_VHOST_SCSI_FEATURES	(SPDK_VHOST_FEATURES | \
52 					(1ULL << VIRTIO_SCSI_F_INOUT) | \
53 					(1ULL << VIRTIO_SCSI_F_HOTPLUG) | \
54 					(1ULL << VIRTIO_SCSI_F_CHANGE ) | \
55 					(1ULL << VIRTIO_SCSI_F_T10_PI ))
56 
57 /* Features that are specified in VIRTIO SCSI but currently not supported:
58  * - Live migration not supported yet
59  * - T10 PI
60  */
61 #define SPDK_VHOST_SCSI_DISABLED_FEATURES	(SPDK_VHOST_DISABLED_FEATURES | \
62 						(1ULL << VIRTIO_SCSI_F_T10_PI ))
63 
64 #define MGMT_POLL_PERIOD_US (1000 * 5)
65 
66 #define VIRTIO_SCSI_CONTROLQ   0
67 #define VIRTIO_SCSI_EVENTQ   1
68 #define VIRTIO_SCSI_REQUESTQ   2
69 
70 struct spdk_scsi_dev_vhost_state {
71 	bool removed;
72 	spdk_vhost_event_fn remove_cb;
73 	void *remove_ctx;
74 };
75 
76 struct spdk_vhost_scsi_dev {
77 	struct spdk_vhost_dev vdev;
78 	struct spdk_scsi_dev *scsi_dev[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS];
79 	struct spdk_scsi_dev_vhost_state scsi_dev_state[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS];
80 
81 	struct spdk_poller *requestq_poller;
82 	struct spdk_poller *mgmt_poller;
83 } __rte_cache_aligned;
84 
85 struct spdk_vhost_scsi_task {
86 	struct spdk_scsi_task	scsi;
87 	struct iovec iovs[SPDK_VHOST_IOVS_MAX];
88 
89 	union {
90 		struct virtio_scsi_cmd_resp *resp;
91 		struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
92 	};
93 
94 	struct spdk_vhost_scsi_dev *svdev;
95 	struct spdk_scsi_dev *scsi_dev;
96 
97 	/** Number of bytes that were written. */
98 	uint32_t used_len;
99 
100 	int req_idx;
101 
102 	/* If set, the task is currently used for I/O processing. */
103 	bool used;
104 
105 	struct spdk_vhost_virtqueue *vq;
106 };
107 
108 static int spdk_vhost_scsi_start(struct spdk_vhost_dev *, void *);
109 static int spdk_vhost_scsi_stop(struct spdk_vhost_dev *, void *);
110 static void spdk_vhost_scsi_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
111 static int spdk_vhost_scsi_dev_remove(struct spdk_vhost_dev *vdev);
112 
113 const struct spdk_vhost_dev_backend spdk_vhost_scsi_device_backend = {
114 	.virtio_features = SPDK_VHOST_SCSI_FEATURES,
115 	.disabled_features = SPDK_VHOST_SCSI_DISABLED_FEATURES,
116 	.start_device =  spdk_vhost_scsi_start,
117 	.stop_device = spdk_vhost_scsi_stop,
118 	.dump_config_json = spdk_vhost_scsi_config_json,
119 	.remove_device = spdk_vhost_scsi_dev_remove,
120 };
121 
122 static void
123 spdk_vhost_scsi_task_put(struct spdk_vhost_scsi_task *task)
124 {
125 	spdk_scsi_task_put(&task->scsi);
126 }
127 
128 static void
129 spdk_vhost_scsi_task_free_cb(struct spdk_scsi_task *scsi_task)
130 {
131 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
132 
133 	assert(task->svdev->vdev.task_cnt > 0);
134 	task->svdev->vdev.task_cnt--;
135 	task->used = false;
136 }
137 
138 static void
139 process_removed_devs(struct spdk_vhost_scsi_dev *svdev)
140 {
141 	struct spdk_scsi_dev *dev;
142 	struct spdk_scsi_dev_vhost_state *state;
143 	int i;
144 
145 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) {
146 		dev = svdev->scsi_dev[i];
147 		state = &svdev->scsi_dev_state[i];
148 
149 		if (dev && state->removed && !spdk_scsi_dev_has_pending_tasks(dev)) {
150 			spdk_scsi_dev_free_io_channels(dev);
151 			svdev->scsi_dev[i] = NULL;
152 			spdk_scsi_dev_destruct(dev);
153 			if (state->remove_cb) {
154 				state->remove_cb(&svdev->vdev, state->remove_ctx);
155 				state->remove_cb = NULL;
156 			}
157 			SPDK_NOTICELOG("%s: hot-detached device 'Dev %u'.\n", svdev->vdev.name, i);
158 		}
159 	}
160 }
161 
162 static void
163 eventq_enqueue(struct spdk_vhost_scsi_dev *svdev, unsigned scsi_dev_num, uint32_t event,
164 	       uint32_t reason)
165 {
166 	struct spdk_vhost_virtqueue *vq;
167 	struct vring_desc *desc, *desc_table;
168 	struct virtio_scsi_event *desc_ev;
169 	uint32_t desc_table_size, req_size = 0;
170 	uint16_t req;
171 	int rc;
172 
173 	assert(scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
174 	vq = &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ];
175 
176 	if (spdk_vhost_vq_avail_ring_get(vq, &req, 1) != 1) {
177 		SPDK_ERRLOG("Controller %s: Failed to send virtio event (no avail ring entries?).\n",
178 			    svdev->vdev.name);
179 		return;
180 	}
181 
182 	rc = spdk_vhost_vq_get_desc(&svdev->vdev, vq, req, &desc, &desc_table, &desc_table_size);
183 	if (rc != 0 || desc->len < sizeof(*desc_ev)) {
184 		SPDK_ERRLOG("Controller %s: Invalid eventq descriptor at index %"PRIu16".\n",
185 			    svdev->vdev.name, req);
186 		goto out;
187 	}
188 
189 	desc_ev = spdk_vhost_gpa_to_vva(&svdev->vdev, desc->addr);
190 	if (desc_ev == NULL) {
191 		SPDK_ERRLOG("Controller %s: Eventq descriptor at index %"PRIu16" points to unmapped guest memory address %p.\n",
192 			    svdev->vdev.name, req, (void *)(uintptr_t)desc->addr);
193 		goto out;
194 	}
195 
196 	desc_ev->event = event;
197 	desc_ev->lun[0] = 1;
198 	desc_ev->lun[1] = scsi_dev_num;
199 	/* virtio LUN id 0 can refer either to the entire device
200 	 * or actual LUN 0 (the only supported by vhost for now)
201 	 */
202 	desc_ev->lun[2] = 0 >> 8;
203 	desc_ev->lun[3] = 0 & 0xFF;
204 	/* virtio doesn't specify any strict format for LUN id (bytes 2 and 3)
205 	 * current implementation relies on linux kernel sources
206 	 */
207 	memset(&desc_ev->lun[4], 0, 4);
208 	desc_ev->reason = reason;
209 	req_size = sizeof(*desc_ev);
210 
211 out:
212 	spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, req, req_size);
213 }
214 
215 static void
216 submit_completion(struct spdk_vhost_scsi_task *task)
217 {
218 	spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx,
219 					task->used_len);
220 	SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "Finished task (%p) req_idx=%d\n", task, task->req_idx);
221 
222 	spdk_vhost_scsi_task_put(task);
223 }
224 
225 static void
226 spdk_vhost_scsi_task_mgmt_cpl(struct spdk_scsi_task *scsi_task)
227 {
228 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
229 
230 	submit_completion(task);
231 }
232 
233 static void
234 spdk_vhost_scsi_task_cpl(struct spdk_scsi_task *scsi_task)
235 {
236 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
237 
238 	/* The SCSI task has completed.  Do final processing and then post
239 	   notification to the virtqueue's "used" ring.
240 	 */
241 	task->resp->status = task->scsi.status;
242 
243 	if (task->scsi.status != SPDK_SCSI_STATUS_GOOD) {
244 		memcpy(task->resp->sense, task->scsi.sense_data, task->scsi.sense_data_len);
245 		task->resp->sense_len = task->scsi.sense_data_len;
246 	}
247 	assert(task->scsi.transfer_len == task->scsi.length);
248 	task->resp->resid = task->scsi.length - task->scsi.data_transferred;
249 
250 	submit_completion(task);
251 }
252 
253 static void
254 task_submit(struct spdk_vhost_scsi_task *task)
255 {
256 	task->resp->response = VIRTIO_SCSI_S_OK;
257 	spdk_scsi_dev_queue_task(task->scsi_dev, &task->scsi);
258 }
259 
260 static void
261 mgmt_task_submit(struct spdk_vhost_scsi_task *task, enum spdk_scsi_task_func func)
262 {
263 	task->tmf_resp->response = VIRTIO_SCSI_S_OK;
264 	spdk_scsi_dev_queue_mgmt_task(task->scsi_dev, &task->scsi, func);
265 }
266 
267 static void
268 invalid_request(struct spdk_vhost_scsi_task *task)
269 {
270 	spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx,
271 					task->used_len);
272 	spdk_vhost_scsi_task_put(task);
273 
274 	SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "Invalid request (status=%" PRIu8")\n",
275 		      task->resp ? task->resp->response : -1);
276 }
277 
278 static int
279 spdk_vhost_scsi_task_init_target(struct spdk_vhost_scsi_task *task, const __u8 *lun)
280 {
281 	struct spdk_scsi_dev *dev;
282 	uint16_t lun_id = (((uint16_t)lun[2] << 8) | lun[3]) & 0x3FFF;
283 
284 	SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_QUEUE, "LUN", lun, 8);
285 
286 	/* First byte must be 1 and second is target */
287 	if (lun[0] != 1 || lun[1] >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
288 		return -1;
289 	}
290 
291 	dev = task->svdev->scsi_dev[lun[1]];
292 	task->scsi_dev = dev;
293 	if (dev == NULL || task->svdev->scsi_dev_state[lun[1]].removed) {
294 		/* If dev has been hotdetached, return 0 to allow sending
295 		 * additional hotremove event via sense codes.
296 		 */
297 		return task->svdev->scsi_dev_state[lun[1]].removed ? 0 : -1;
298 	}
299 
300 	task->scsi.target_port = spdk_scsi_dev_find_port_by_id(task->scsi_dev, 0);
301 	task->scsi.lun = spdk_scsi_dev_get_lun(dev, lun_id);
302 	return 0;
303 }
304 
305 static void
306 process_ctrl_request(struct spdk_vhost_scsi_task *task)
307 {
308 	struct spdk_vhost_dev *vdev = &task->svdev->vdev;
309 	struct vring_desc *desc, *desc_table;
310 	struct virtio_scsi_ctrl_tmf_req *ctrl_req;
311 	struct virtio_scsi_ctrl_an_resp *an_resp;
312 	uint32_t desc_table_size, used_len = 0;
313 	int rc;
314 
315 	spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_mgmt_cpl, spdk_vhost_scsi_task_free_cb);
316 	rc = spdk_vhost_vq_get_desc(vdev, task->vq, task->req_idx, &desc, &desc_table, &desc_table_size);
317 	if (spdk_unlikely(rc != 0)) {
318 		SPDK_ERRLOG("%s: Invalid controlq descriptor at index %d.\n",
319 			    vdev->name, task->req_idx);
320 		goto out;
321 	}
322 
323 	ctrl_req = spdk_vhost_gpa_to_vva(vdev, desc->addr);
324 
325 	SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE,
326 		      "Processing controlq descriptor: desc %d/%p, desc_addr %p, len %d, flags %d, last_used_idx %d; kickfd %d; size %d\n",
327 		      task->req_idx, desc, (void *)desc->addr, desc->len, desc->flags, task->vq->vring.last_used_idx,
328 		      task->vq->vring.kickfd, task->vq->vring.size);
329 	SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_QUEUE, "Request descriptor", (uint8_t *)ctrl_req,
330 		       desc->len);
331 
332 	spdk_vhost_scsi_task_init_target(task, ctrl_req->lun);
333 
334 	spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_size);
335 	if (spdk_unlikely(desc == NULL)) {
336 		SPDK_ERRLOG("%s: No response descriptor for controlq request %d.\n",
337 			    vdev->name, task->req_idx);
338 		goto out;
339 	}
340 
341 	/* Process the TMF request */
342 	switch (ctrl_req->type) {
343 	case VIRTIO_SCSI_T_TMF:
344 		task->tmf_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
345 		if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_ctrl_tmf_resp) || task->tmf_resp == NULL)) {
346 			SPDK_ERRLOG("%s: TMF response descriptor at index %d points to invalid guest memory region\n",
347 				    vdev->name, task->req_idx);
348 			goto out;
349 		}
350 
351 		/* Check if we are processing a valid request */
352 		if (task->scsi_dev == NULL) {
353 			task->tmf_resp->response = VIRTIO_SCSI_S_BAD_TARGET;
354 			break;
355 		}
356 
357 		switch (ctrl_req->subtype) {
358 		case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
359 			/* Handle LUN reset */
360 			SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "LUN reset\n");
361 
362 			mgmt_task_submit(task, SPDK_SCSI_TASK_FUNC_LUN_RESET);
363 			return;
364 		default:
365 			task->tmf_resp->response = VIRTIO_SCSI_S_ABORTED;
366 			/* Unsupported command */
367 			SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "Unsupported TMF command %x\n", ctrl_req->subtype);
368 			break;
369 		}
370 		break;
371 	case VIRTIO_SCSI_T_AN_QUERY:
372 	case VIRTIO_SCSI_T_AN_SUBSCRIBE: {
373 		an_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
374 		if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_ctrl_an_resp) || an_resp == NULL)) {
375 			SPDK_WARNLOG("%s: Asynchronous response descriptor points to invalid guest memory region\n",
376 				     vdev->name);
377 			goto out;
378 		}
379 
380 		an_resp->response = VIRTIO_SCSI_S_ABORTED;
381 		break;
382 	}
383 	default:
384 		SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE, "Unsupported control command %x\n", ctrl_req->type);
385 		break;
386 	}
387 
388 	used_len = sizeof(struct virtio_scsi_ctrl_tmf_resp);
389 out:
390 	spdk_vhost_vq_used_ring_enqueue(vdev, task->vq, task->req_idx, used_len);
391 	spdk_vhost_scsi_task_put(task);
392 }
393 
394 /*
395  * Process task's descriptor chain and setup data related fields.
396  * Return
397  *   -1 if request is invalid and must be aborted,
398  *    0 if all data are set.
399  */
400 static int
401 task_data_setup(struct spdk_vhost_scsi_task *task,
402 		struct virtio_scsi_cmd_req **req)
403 {
404 	struct spdk_vhost_dev *vdev = &task->svdev->vdev;
405 	struct vring_desc *desc, *desc_table;
406 	struct iovec *iovs = task->iovs;
407 	uint16_t iovcnt = 0;
408 	uint32_t desc_table_len, len = 0;
409 	int rc;
410 
411 	spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_cpl, spdk_vhost_scsi_task_free_cb);
412 
413 	rc = spdk_vhost_vq_get_desc(vdev, task->vq, task->req_idx, &desc, &desc_table, &desc_table_len);
414 	/* First descriptor must be readable */
415 	if (spdk_unlikely(rc != 0  || spdk_vhost_vring_desc_is_wr(desc) ||
416 			  desc->len < sizeof(struct virtio_scsi_cmd_req))) {
417 		SPDK_WARNLOG("%s: invalid first (request) descriptor at index %"PRIu16".\n",
418 			     vdev->name, task->req_idx);
419 		goto invalid_task;
420 	}
421 
422 	*req = spdk_vhost_gpa_to_vva(vdev, desc->addr);
423 	if (spdk_unlikely(*req == NULL)) {
424 		SPDK_WARNLOG("%s: Request descriptor at index %d points to invalid guest memory region\n",
425 			     vdev->name, task->req_idx);
426 		goto invalid_task;
427 	}
428 
429 	/* Each request must have at least 2 descriptors (e.g. request and response) */
430 	spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len);
431 	if (desc == NULL) {
432 		SPDK_WARNLOG("%s: Descriptor chain at index %d contains neither payload nor response buffer.\n",
433 			     vdev->name, task->req_idx);
434 		goto invalid_task;
435 	}
436 	task->scsi.dxfer_dir = spdk_vhost_vring_desc_is_wr(desc) ? SPDK_SCSI_DIR_FROM_DEV :
437 			       SPDK_SCSI_DIR_TO_DEV;
438 	task->scsi.iovs = iovs;
439 
440 	if (task->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV) {
441 		/*
442 		 * FROM_DEV (READ): [RD_req][WR_resp][WR_buf0]...[WR_bufN]
443 		 */
444 		task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
445 		if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_cmd_resp) || task->resp == NULL)) {
446 			SPDK_WARNLOG("%s: Response descriptor at index %d points to invalid guest memory region\n",
447 				     vdev->name, task->req_idx);
448 			goto invalid_task;
449 		}
450 		rc = spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len);
451 		if (spdk_unlikely(rc != 0)) {
452 			SPDK_WARNLOG("%s: invalid descriptor chain at request index %d (descriptor id overflow?).\n",
453 				     vdev->name, task->req_idx);
454 			goto invalid_task;
455 		}
456 
457 		if (desc == NULL) {
458 			/*
459 			 * TEST UNIT READY command and some others might not contain any payload and this is not an error.
460 			 */
461 			SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA,
462 				      "No payload descriptors for FROM DEV command req_idx=%"PRIu16".\n", task->req_idx);
463 			SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_DATA, "CDB=", (*req)->cdb, VIRTIO_SCSI_CDB_SIZE);
464 			task->used_len = sizeof(struct virtio_scsi_cmd_resp);
465 			task->scsi.iovcnt = 1;
466 			task->scsi.iovs[0].iov_len = 0;
467 			task->scsi.length = 0;
468 			task->scsi.transfer_len = 0;
469 			return 0;
470 		}
471 
472 		/* All remaining descriptors are data. */
473 		while (desc) {
474 			if (spdk_unlikely(!spdk_vhost_vring_desc_is_wr(desc))) {
475 				SPDK_WARNLOG("FROM DEV cmd: descriptor nr %" PRIu16" in payload chain is read only.\n", iovcnt);
476 				goto invalid_task;
477 			}
478 
479 			if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) {
480 				goto invalid_task;
481 			}
482 			len += desc->len;
483 
484 			rc = spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len);
485 			if (spdk_unlikely(rc != 0)) {
486 				SPDK_WARNLOG("%s: invalid payload in descriptor chain starting at index %d.\n",
487 					     vdev->name, task->req_idx);
488 				goto invalid_task;
489 			}
490 		}
491 
492 		task->used_len = sizeof(struct virtio_scsi_cmd_resp) + len;
493 	} else {
494 		SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA, "TO DEV");
495 		/*
496 		 * TO_DEV (WRITE):[RD_req][RD_buf0]...[RD_bufN][WR_resp]
497 		 * No need to check descriptor WR flag as this is done while setting scsi.dxfer_dir.
498 		 */
499 
500 		/* Process descriptors up to response. */
501 		while (!spdk_vhost_vring_desc_is_wr(desc)) {
502 			if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) {
503 				goto invalid_task;
504 			}
505 			len += desc->len;
506 
507 			spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len);
508 			if (spdk_unlikely(desc == NULL)) {
509 				SPDK_WARNLOG("TO_DEV cmd: no response descriptor.\n");
510 				goto invalid_task;
511 			}
512 		}
513 
514 		task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
515 		if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_cmd_resp) || task->resp == NULL)) {
516 			SPDK_WARNLOG("%s: Response descriptor at index %d points to invalid guest memory region\n",
517 				     vdev->name, task->req_idx);
518 			goto invalid_task;
519 		}
520 
521 		task->used_len = sizeof(struct virtio_scsi_cmd_resp);
522 	}
523 
524 	task->scsi.iovcnt = iovcnt;
525 	task->scsi.length = len;
526 	task->scsi.transfer_len = len;
527 	return 0;
528 
529 invalid_task:
530 	SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_DATA, "%s: Invalid task at index %"PRIu16".\n",
531 		      vdev->name, task->req_idx);
532 	return -1;
533 }
534 
535 static int
536 process_request(struct spdk_vhost_scsi_task *task)
537 {
538 	struct virtio_scsi_cmd_req *req;
539 	int result;
540 
541 	result = task_data_setup(task, &req);
542 	if (result) {
543 		return result;
544 	}
545 
546 	result = spdk_vhost_scsi_task_init_target(task, req->lun);
547 	if (spdk_unlikely(result != 0)) {
548 		task->resp->response = VIRTIO_SCSI_S_BAD_TARGET;
549 		return -1;
550 	}
551 
552 	task->scsi.cdb = req->cdb;
553 	SPDK_TRACEDUMP(SPDK_LOG_VHOST_SCSI_DATA, "request CDB", req->cdb, VIRTIO_SCSI_CDB_SIZE);
554 
555 	if (spdk_unlikely(task->scsi.lun == NULL)) {
556 		spdk_scsi_task_process_null_lun(&task->scsi);
557 		task->resp->response = VIRTIO_SCSI_S_OK;
558 		return 1;
559 	}
560 
561 	return 0;
562 }
563 
564 static void
565 process_controlq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq)
566 {
567 	struct spdk_vhost_scsi_task *task;
568 	uint16_t reqs[32];
569 	uint16_t reqs_cnt, i;
570 
571 	reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs));
572 	for (i = 0; i < reqs_cnt; i++) {
573 		if (spdk_unlikely(reqs[i] >= vq->vring.size)) {
574 			SPDK_ERRLOG("%s: invalid entry in avail ring. Buffer '%"PRIu16"' exceeds virtqueue size (%"PRIu16")\n",
575 				    svdev->vdev.name, reqs[i], vq->vring.size);
576 			spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
577 			continue;
578 		}
579 
580 		task = &((struct spdk_vhost_scsi_task *)vq->tasks)[reqs[i]];
581 		if (spdk_unlikely(task->used)) {
582 			SPDK_ERRLOG("%s: invalid entry in avail ring. Buffer '%"PRIu16"' is still in use!\n",
583 				    svdev->vdev.name, reqs[i]);
584 			spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
585 			continue;
586 		}
587 
588 		svdev->vdev.task_cnt++;
589 		memset(&task->scsi, 0, sizeof(task->scsi));
590 		task->tmf_resp = NULL;
591 		task->used = true;
592 		process_ctrl_request(task);
593 	}
594 }
595 
596 static void
597 process_requestq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq)
598 {
599 	struct spdk_vhost_scsi_task *task;
600 	uint16_t reqs[32];
601 	uint16_t reqs_cnt, i;
602 	int result;
603 
604 	reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs));
605 	assert(reqs_cnt <= 32);
606 
607 	for (i = 0; i < reqs_cnt; i++) {
608 		SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Starting processing request idx %"PRIu16"======\n",
609 			      reqs[i]);
610 
611 		if (spdk_unlikely(reqs[i] >= vq->vring.size)) {
612 			SPDK_ERRLOG("%s: request idx '%"PRIu16"' exceeds virtqueue size (%"PRIu16").\n",
613 				    svdev->vdev.name, reqs[i], vq->vring.size);
614 			spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
615 			continue;
616 		}
617 
618 		task = &((struct spdk_vhost_scsi_task *)vq->tasks)[reqs[i]];
619 		if (spdk_unlikely(task->used)) {
620 			SPDK_ERRLOG("%s: request with idx '%"PRIu16"' is already pending.\n",
621 				    svdev->vdev.name, reqs[i]);
622 			spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
623 			continue;
624 		}
625 
626 		svdev->vdev.task_cnt++;
627 		memset(&task->scsi, 0, sizeof(task->scsi));
628 		task->resp = NULL;
629 		task->used = true;
630 		task->used_len = 0;
631 		result = process_request(task);
632 		if (likely(result == 0)) {
633 			task_submit(task);
634 			SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d submitted ======\n", task,
635 				      task->req_idx);
636 		} else if (result > 0) {
637 			spdk_vhost_scsi_task_cpl(&task->scsi);
638 			SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d finished early ======\n", task,
639 				      task->req_idx);
640 		} else {
641 			invalid_request(task);
642 			SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "====== Task %p req_idx %d failed ======\n", task,
643 				      task->req_idx);
644 		}
645 	}
646 }
647 
648 static void
649 vdev_mgmt_worker(void *arg)
650 {
651 	struct spdk_vhost_scsi_dev *svdev = arg;
652 
653 	process_removed_devs(svdev);
654 	spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ]);
655 
656 	process_controlq(svdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]);
657 	spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]);
658 }
659 
660 static void
661 vdev_worker(void *arg)
662 {
663 	struct spdk_vhost_scsi_dev *svdev = arg;
664 	uint32_t q_idx;
665 
666 	for (q_idx = VIRTIO_SCSI_REQUESTQ; q_idx < svdev->vdev.num_queues; q_idx++) {
667 		process_requestq(svdev, &svdev->vdev.virtqueue[q_idx]);
668 	}
669 
670 	spdk_vhost_dev_used_signal(&svdev->vdev);
671 }
672 
673 static struct spdk_vhost_scsi_dev *
674 to_scsi_dev(struct spdk_vhost_dev *ctrlr)
675 {
676 	if (ctrlr == NULL) {
677 		return NULL;
678 	}
679 
680 	if (ctrlr->backend != &spdk_vhost_scsi_device_backend) {
681 		SPDK_ERRLOG("%s: not a vhost-scsi device.\n", ctrlr->name);
682 		return NULL;
683 	}
684 
685 	return SPDK_CONTAINEROF(ctrlr, struct spdk_vhost_scsi_dev, vdev);
686 }
687 
688 int
689 spdk_vhost_scsi_dev_construct(const char *name, const char *cpumask)
690 {
691 	struct spdk_vhost_scsi_dev *svdev = spdk_dma_zmalloc(sizeof(struct spdk_vhost_scsi_dev),
692 					    SPDK_CACHE_LINE_SIZE, NULL);
693 	int rc;
694 
695 	if (svdev == NULL) {
696 		return -ENOMEM;
697 	}
698 
699 	spdk_vhost_lock();
700 	rc = spdk_vhost_dev_register(&svdev->vdev, name, cpumask,
701 				     &spdk_vhost_scsi_device_backend);
702 
703 	if (rc) {
704 		spdk_dma_free(svdev);
705 	}
706 
707 	spdk_vhost_unlock();
708 	return rc;
709 }
710 
711 static int
712 spdk_vhost_scsi_dev_remove(struct spdk_vhost_dev *vdev)
713 {
714 	struct spdk_vhost_scsi_dev *svdev = to_scsi_dev(vdev);
715 	int rc, i;
716 
717 	if (svdev == NULL) {
718 		return -EINVAL;
719 	}
720 
721 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) {
722 		if (svdev->scsi_dev[i]) {
723 			if (vdev->registered) {
724 				SPDK_ERRLOG("Trying to remove non-empty controller: %s.\n", vdev->name);
725 				return -EBUSY;
726 			}
727 
728 			rc = spdk_vhost_scsi_dev_remove_tgt(vdev, i, NULL, NULL);
729 			if (rc != 0) {
730 				SPDK_ERRLOG("%s: failed to force-remove target %d\n", vdev->name, i);
731 				return rc;
732 			}
733 		}
734 	}
735 
736 	rc = spdk_vhost_dev_unregister(vdev);
737 	if (rc != 0) {
738 		return rc;
739 	}
740 
741 	spdk_dma_free(svdev);
742 	return 0;
743 }
744 
745 struct spdk_scsi_dev *
746 spdk_vhost_scsi_dev_get_tgt(struct spdk_vhost_dev *vdev, uint8_t num)
747 {
748 	struct spdk_vhost_scsi_dev *svdev;
749 
750 	assert(num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
751 	svdev = to_scsi_dev(vdev);
752 
753 	return svdev ? svdev->scsi_dev[num] : NULL;
754 }
755 
756 static void
757 spdk_vhost_scsi_lun_hotremove(const struct spdk_scsi_lun *lun, void *arg)
758 {
759 	struct spdk_vhost_scsi_dev *svdev = arg;
760 	const struct spdk_scsi_dev *scsi_dev;
761 	unsigned scsi_dev_num;
762 
763 	assert(lun != NULL);
764 	assert(svdev != NULL);
765 	if (svdev->vdev.lcore != -1 &&
766 	    !spdk_vhost_dev_has_feature(&svdev->vdev, VIRTIO_SCSI_F_HOTPLUG)) {
767 		SPDK_WARNLOG("%s: hotremove is not enabled for this controller.\n", svdev->vdev.name);
768 		return;
769 	}
770 
771 	scsi_dev = spdk_scsi_lun_get_dev(lun);
772 	for (scsi_dev_num = 0; scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; scsi_dev_num++) {
773 		if (svdev->scsi_dev[scsi_dev_num] == scsi_dev) {
774 			break;
775 		}
776 	}
777 
778 	if (scsi_dev_num == SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
779 		/* The entire device has been already removed. */
780 		return;
781 	}
782 
783 	/* remove entire device */
784 	spdk_vhost_scsi_dev_remove_tgt(&svdev->vdev, scsi_dev_num, NULL, NULL);
785 }
786 
787 int
788 spdk_vhost_scsi_dev_add_tgt(struct spdk_vhost_dev *vdev, unsigned scsi_tgt_num,
789 			    const char *bdev_name)
790 {
791 	struct spdk_vhost_scsi_dev *svdev;
792 	char target_name[SPDK_SCSI_DEV_MAX_NAME];
793 	int lun_id_list[1];
794 	const char *bdev_names_list[1];
795 
796 	svdev = to_scsi_dev(vdev);
797 	if (svdev == NULL) {
798 		return -EINVAL;
799 	}
800 
801 	if (scsi_tgt_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
802 		SPDK_ERRLOG("Controller %d target number too big (max %d)\n", scsi_tgt_num,
803 			    SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
804 		return -EINVAL;
805 	}
806 
807 	if (bdev_name == NULL) {
808 		SPDK_ERRLOG("No lun name specified\n");
809 		return -EINVAL;
810 	}
811 
812 	if (vdev->lcore != -1 && !spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
813 		SPDK_ERRLOG("Controller %s is in use and hotplug is not supported\n", vdev->name);
814 		return -ENOTSUP;
815 	}
816 
817 	if (svdev->scsi_dev[scsi_tgt_num] != NULL) {
818 		SPDK_ERRLOG("Controller %s target %u already occupied\n", vdev->name, scsi_tgt_num);
819 		return -EEXIST;
820 	}
821 
822 	/*
823 	 * At this stage only one LUN per target
824 	 */
825 	snprintf(target_name, sizeof(target_name), "Target %u", scsi_tgt_num);
826 	lun_id_list[0] = 0;
827 	bdev_names_list[0] = (char *)bdev_name;
828 
829 	svdev->scsi_dev_state[scsi_tgt_num].removed = false;
830 	svdev->scsi_dev[scsi_tgt_num] = spdk_scsi_dev_construct(target_name, bdev_names_list, lun_id_list,
831 					1,
832 					SPDK_SPC_PROTOCOL_IDENTIFIER_SAS, spdk_vhost_scsi_lun_hotremove, svdev);
833 
834 	if (svdev->scsi_dev[scsi_tgt_num] == NULL) {
835 		SPDK_ERRLOG("Couldn't create spdk SCSI target '%s' using bdev '%s' in controller: %s\n",
836 			    target_name, bdev_name, vdev->name);
837 		return -EINVAL;
838 	}
839 	spdk_scsi_dev_add_port(svdev->scsi_dev[scsi_tgt_num], 0, "vhost");
840 
841 	if (vdev->lcore != -1) {
842 		spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[scsi_tgt_num]);
843 		eventq_enqueue(svdev, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_RESCAN);
844 	}
845 
846 	SPDK_NOTICELOG("Controller %s: defined target '%s' using bdev '%s'\n",
847 		       vdev->name, target_name, bdev_name);
848 	return 0;
849 }
850 
851 int
852 spdk_vhost_scsi_dev_remove_tgt(struct spdk_vhost_dev *vdev, unsigned scsi_tgt_num,
853 			       spdk_vhost_event_fn cb_fn, void *cb_arg)
854 {
855 	struct spdk_vhost_scsi_dev *svdev;
856 	struct spdk_scsi_dev *scsi_dev;
857 	struct spdk_scsi_dev_vhost_state *scsi_dev_state;
858 	int rc = 0;
859 
860 	if (scsi_tgt_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
861 		SPDK_ERRLOG("%s: invalid target number %d\n", vdev->name, scsi_tgt_num);
862 		return -EINVAL;
863 	}
864 
865 	svdev = to_scsi_dev(vdev);
866 	if (svdev == NULL) {
867 		return -ENODEV;
868 	}
869 
870 	scsi_dev = svdev->scsi_dev[scsi_tgt_num];
871 	if (scsi_dev == NULL) {
872 		SPDK_ERRLOG("Controller %s target %u is not occupied\n", vdev->name, scsi_tgt_num);
873 		return -ENODEV;
874 	}
875 
876 	if (svdev->vdev.lcore == -1) {
877 		/* controller is not in use, remove dev and exit */
878 		svdev->scsi_dev[scsi_tgt_num] = NULL;
879 		spdk_scsi_dev_destruct(scsi_dev);
880 		if (cb_fn) {
881 			rc = cb_fn(vdev, cb_arg);
882 		}
883 		SPDK_NOTICELOG("%s: removed target 'Target %u'\n", vdev->name, scsi_tgt_num);
884 		return rc;
885 	}
886 
887 	if (!spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
888 		SPDK_WARNLOG("%s: 'Target %u' is in use and hot-detach is not enabled for this controller.\n",
889 			     svdev->vdev.name, scsi_tgt_num);
890 		return -ENOTSUP;
891 	}
892 
893 	scsi_dev_state = &svdev->scsi_dev_state[scsi_tgt_num];
894 	if (scsi_dev_state->removed) {
895 		SPDK_WARNLOG("%s: 'Target %u' has been already marked to hotremove.\n", svdev->vdev.name,
896 			     scsi_tgt_num);
897 		return -EBUSY;
898 	}
899 
900 	scsi_dev_state->remove_cb = cb_fn;
901 	scsi_dev_state->remove_ctx = cb_arg;
902 	scsi_dev_state->removed = true;
903 	eventq_enqueue(svdev, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_REMOVED);
904 
905 	SPDK_NOTICELOG("%s: queued 'Target %u' for hot-detach.\n", vdev->name, scsi_tgt_num);
906 	return 0;
907 }
908 
909 int
910 spdk_vhost_scsi_controller_construct(void)
911 {
912 	struct spdk_conf_section *sp = spdk_conf_first_section(NULL);
913 	struct spdk_vhost_dev *vdev;
914 	int i, dev_num;
915 	unsigned ctrlr_num = 0;
916 	char *bdev_name, *tgt_num_str;
917 	char *cpumask;
918 	char *name;
919 	char *keyword;
920 	char *dev = NULL, *tgt = NULL;
921 
922 	while (sp != NULL) {
923 		if (!spdk_conf_section_match_prefix(sp, "VhostScsi")) {
924 			sp = spdk_conf_next_section(sp);
925 			continue;
926 		}
927 
928 		if (sscanf(spdk_conf_section_get_name(sp), "VhostScsi%u", &ctrlr_num) != 1) {
929 			SPDK_ERRLOG("Section '%s' has non-numeric suffix.\n",
930 				    spdk_conf_section_get_name(sp));
931 			return -1;
932 		}
933 
934 		name =  spdk_conf_section_get_val(sp, "Name");
935 		cpumask = spdk_conf_section_get_val(sp, "Cpumask");
936 
937 		if (spdk_vhost_scsi_dev_construct(name, cpumask) < 0) {
938 			return -1;
939 		}
940 
941 		vdev = spdk_vhost_dev_find(name);
942 		assert(vdev);
943 
944 		dev = spdk_conf_section_get_nval(sp, "Dev", 0);
945 		tgt = spdk_conf_section_get_nval(sp, "Target", 0);
946 
947 		if (dev && tgt) {
948 			SPDK_ERRLOG("Used both 'Dev' and 'Target' keywords in section [VhostScsi%u]\n"
949 				    "Please use one.\n", ctrlr_num);
950 			return -1;
951 		} else if (dev) {
952 			SPDK_NOTICELOG("'Dev' mnemonic is deprecated, and will be removed shortly.\n"
953 				       "Please, use 'Target' instead\n");
954 			keyword = "Dev";
955 		} else {
956 			keyword = "Target";
957 		}
958 
959 		for (i = 0; ; i++) {
960 
961 			tgt = spdk_conf_section_get_nval(sp, keyword, i);
962 			if (tgt == NULL) {
963 				break;
964 			}
965 
966 			tgt_num_str = spdk_conf_section_get_nmval(sp, keyword, i, 0);
967 			if (tgt_num_str == NULL) {
968 				SPDK_ERRLOG("%s: Invalid or missing target number\n", name);
969 				return -1;
970 			}
971 
972 			dev_num = (int)strtol(tgt_num_str, NULL, 10);
973 			bdev_name = spdk_conf_section_get_nmval(sp, keyword, i, 1);
974 			if (bdev_name == NULL) {
975 				SPDK_ERRLOG("%s: Invalid or missing bdev name for target %d\n", name, dev_num);
976 				return -1;
977 			} else if (spdk_conf_section_get_nmval(sp, keyword, i, 2)) {
978 				SPDK_ERRLOG("%s: Only one LUN per vhost SCSI device supported\n", name);
979 				return -1;
980 			}
981 
982 			if (spdk_vhost_scsi_dev_add_tgt(vdev, dev_num, bdev_name) < 0) {
983 				return -1;
984 			}
985 		}
986 
987 		sp = spdk_conf_next_section(sp);
988 	}
989 
990 	return 0;
991 }
992 
993 static void
994 free_task_pool(struct spdk_vhost_scsi_dev *svdev)
995 {
996 	struct spdk_vhost_virtqueue *vq;
997 	uint16_t i;
998 
999 	for (i = 0; i < svdev->vdev.num_queues; i++) {
1000 		vq = &svdev->vdev.virtqueue[i];
1001 		if (vq->tasks == NULL) {
1002 			continue;
1003 		}
1004 
1005 		spdk_dma_free(vq->tasks);
1006 		vq->tasks = NULL;
1007 	}
1008 }
1009 
1010 static int
1011 alloc_task_pool(struct spdk_vhost_scsi_dev *svdev)
1012 {
1013 	struct spdk_vhost_virtqueue *vq;
1014 	struct spdk_vhost_scsi_task *task;
1015 	uint32_t task_cnt;
1016 	uint16_t i;
1017 	uint32_t j;
1018 
1019 	for (i = 0; i < svdev->vdev.num_queues; i++) {
1020 		vq = &svdev->vdev.virtqueue[i];
1021 		task_cnt = vq->vring.size;
1022 		if (task_cnt > SPDK_VHOST_MAX_VQ_SIZE) {
1023 			/* sanity check */
1024 			SPDK_ERRLOG("Controller %s: virtuque %"PRIu16" is too big. (size = %"PRIu32", max = %"PRIu32")\n",
1025 				    svdev->vdev.name, i, task_cnt, SPDK_VHOST_MAX_VQ_SIZE);
1026 			free_task_pool(svdev);
1027 			return -1;
1028 		}
1029 		vq->tasks = spdk_dma_zmalloc(sizeof(struct spdk_vhost_scsi_task) * task_cnt,
1030 					     SPDK_CACHE_LINE_SIZE, NULL);
1031 		if (vq->tasks == NULL) {
1032 			SPDK_ERRLOG("Controller %s: failed to allocate %"PRIu32" tasks for virtqueue %"PRIu16"\n",
1033 				    svdev->vdev.name, task_cnt, i);
1034 			free_task_pool(svdev);
1035 			return -1;
1036 		}
1037 
1038 		for (j = 0; j < task_cnt; j++) {
1039 			task = &((struct spdk_vhost_scsi_task *)vq->tasks)[j];
1040 			task->svdev = svdev;
1041 			task->vq = vq;
1042 			task->req_idx = j;
1043 		}
1044 	}
1045 
1046 	return 0;
1047 }
1048 
1049 /*
1050  * A new device is added to a data core. First the device is added to the main linked list
1051  * and then allocated to a specific data core.
1052  */
1053 static int
1054 spdk_vhost_scsi_start(struct spdk_vhost_dev *vdev, void *event_ctx)
1055 {
1056 	struct spdk_vhost_scsi_dev *svdev;
1057 	uint32_t i;
1058 	int rc;
1059 
1060 	svdev = to_scsi_dev(vdev);
1061 	if (svdev == NULL) {
1062 		SPDK_ERRLOG("Trying to start non-scsi controller as a scsi one.\n");
1063 		rc = -1;
1064 		goto out;
1065 	}
1066 
1067 	rc = alloc_task_pool(svdev);
1068 	if (rc != 0) {
1069 		SPDK_ERRLOG("%s: failed to alloc task pool.\n", vdev->name);
1070 		goto out;
1071 	}
1072 
1073 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {
1074 		if (svdev->scsi_dev[i] == NULL) {
1075 			continue;
1076 		}
1077 		spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[i]);
1078 	}
1079 	SPDK_NOTICELOG("Started poller for vhost controller %s on lcore %d\n", vdev->name, vdev->lcore);
1080 
1081 	spdk_vhost_dev_mem_register(vdev);
1082 
1083 	svdev->requestq_poller = spdk_poller_register(vdev_worker, svdev, 0);
1084 	svdev->mgmt_poller = spdk_poller_register(vdev_mgmt_worker, svdev,
1085 			     MGMT_POLL_PERIOD_US);
1086 out:
1087 	spdk_vhost_dev_backend_event_done(event_ctx, rc);
1088 	return rc;
1089 }
1090 
1091 struct spdk_vhost_dev_destroy_ctx {
1092 	struct spdk_vhost_scsi_dev *svdev;
1093 	struct spdk_poller *poller;
1094 	void *event_ctx;
1095 };
1096 
1097 static void
1098 destroy_device_poller_cb(void *arg)
1099 {
1100 	struct spdk_vhost_dev_destroy_ctx *ctx = arg;
1101 	struct spdk_vhost_scsi_dev *svdev = ctx->svdev;
1102 	uint32_t i;
1103 
1104 	if (svdev->vdev.task_cnt > 0) {
1105 		return;
1106 	}
1107 
1108 
1109 	for (i = 0; i < svdev->vdev.num_queues; i++) {
1110 		spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[i]);
1111 	}
1112 
1113 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {
1114 		if (svdev->scsi_dev[i] == NULL) {
1115 			continue;
1116 		}
1117 		spdk_scsi_dev_free_io_channels(svdev->scsi_dev[i]);
1118 	}
1119 
1120 	SPDK_NOTICELOG("Stopping poller for vhost controller %s\n", svdev->vdev.name);
1121 	spdk_vhost_dev_mem_unregister(&svdev->vdev);
1122 
1123 	free_task_pool(svdev);
1124 
1125 	spdk_poller_unregister(&ctx->poller);
1126 	spdk_vhost_dev_backend_event_done(ctx->event_ctx, 0);
1127 	spdk_dma_free(ctx);
1128 }
1129 
1130 static int
1131 spdk_vhost_scsi_stop(struct spdk_vhost_dev *vdev, void *event_ctx)
1132 {
1133 	struct spdk_vhost_scsi_dev *svdev;
1134 	struct spdk_vhost_dev_destroy_ctx *destroy_ctx;
1135 
1136 	svdev = to_scsi_dev(vdev);
1137 	if (svdev == NULL) {
1138 		SPDK_ERRLOG("Trying to stop non-scsi controller as a scsi one.\n");
1139 		goto err;
1140 	}
1141 
1142 	destroy_ctx = spdk_dma_zmalloc(sizeof(*destroy_ctx), SPDK_CACHE_LINE_SIZE, NULL);
1143 	if (destroy_ctx == NULL) {
1144 		SPDK_ERRLOG("Failed to alloc memory for destroying device.\n");
1145 		goto err;
1146 	}
1147 
1148 	destroy_ctx->svdev = svdev;
1149 	destroy_ctx->event_ctx = event_ctx;
1150 
1151 	spdk_poller_unregister(&svdev->requestq_poller);
1152 	spdk_poller_unregister(&svdev->mgmt_poller);
1153 	destroy_ctx->poller = spdk_poller_register(destroy_device_poller_cb, destroy_ctx,
1154 			      1000);
1155 
1156 	return 0;
1157 
1158 err:
1159 	spdk_vhost_dev_backend_event_done(event_ctx, -1);
1160 	return -1;
1161 }
1162 
1163 static void
1164 spdk_vhost_scsi_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
1165 {
1166 	struct spdk_scsi_dev *sdev;
1167 	struct spdk_scsi_lun *lun;
1168 	uint32_t dev_idx;
1169 	uint32_t lun_idx;
1170 
1171 	assert(vdev != NULL);
1172 	spdk_json_write_name(w, "scsi");
1173 	spdk_json_write_array_begin(w);
1174 	for (dev_idx = 0; dev_idx < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; dev_idx++) {
1175 		sdev = spdk_vhost_scsi_dev_get_tgt(vdev, dev_idx);
1176 		if (!sdev) {
1177 			continue;
1178 		}
1179 
1180 		spdk_json_write_object_begin(w);
1181 
1182 		spdk_json_write_name(w, "scsi_dev_num");
1183 		spdk_json_write_uint32(w, dev_idx);
1184 
1185 		spdk_json_write_name(w, "id");
1186 		spdk_json_write_int32(w, spdk_scsi_dev_get_id(sdev));
1187 
1188 		spdk_json_write_name(w, "target_name");
1189 		spdk_json_write_string(w, spdk_scsi_dev_get_name(sdev));
1190 
1191 		spdk_json_write_name(w, "luns");
1192 		spdk_json_write_array_begin(w);
1193 
1194 		for (lun_idx = 0; lun_idx < SPDK_SCSI_DEV_MAX_LUN; lun_idx++) {
1195 			lun = spdk_scsi_dev_get_lun(sdev, lun_idx);
1196 			if (!lun) {
1197 				continue;
1198 			}
1199 
1200 			spdk_json_write_object_begin(w);
1201 
1202 			spdk_json_write_name(w, "id");
1203 			spdk_json_write_int32(w, spdk_scsi_lun_get_id(lun));
1204 
1205 			spdk_json_write_name(w, "bdev_name");
1206 			spdk_json_write_string(w, spdk_scsi_lun_get_bdev_name(lun));
1207 
1208 			spdk_json_write_object_end(w);
1209 		}
1210 
1211 		spdk_json_write_array_end(w);
1212 		spdk_json_write_object_end(w);
1213 	}
1214 
1215 	spdk_json_write_array_end(w);
1216 }
1217 
1218 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi", SPDK_LOG_VHOST_SCSI)
1219 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi_queue", SPDK_LOG_VHOST_SCSI_QUEUE)
1220 SPDK_LOG_REGISTER_COMPONENT("vhost_scsi_data", SPDK_LOG_VHOST_SCSI_DATA)
1221