xref: /spdk/lib/vhost/vhost_scsi.c (revision 7e846d2bb99838a21b042dd2db1d0e36eb17f95c)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include <linux/virtio_scsi.h>
37 
38 #include "spdk/env.h"
39 #include "spdk/scsi.h"
40 #include "spdk/scsi_spec.h"
41 #include "spdk/conf.h"
42 #include "spdk/event.h"
43 #include "spdk/util.h"
44 #include "spdk/likely.h"
45 
46 #include "spdk/vhost.h"
47 #include "vhost_internal.h"
48 
49 /* Features supported by SPDK VHOST lib. */
50 #define SPDK_VHOST_SCSI_FEATURES	(SPDK_VHOST_FEATURES | \
51 					(1ULL << VIRTIO_SCSI_F_INOUT) | \
52 					(1ULL << VIRTIO_SCSI_F_HOTPLUG) | \
53 					(1ULL << VIRTIO_SCSI_F_CHANGE ) | \
54 					(1ULL << VIRTIO_SCSI_F_T10_PI ))
55 
56 /* Features that are specified in VIRTIO SCSI but currently not supported:
57  * - Live migration not supported yet
58  * - T10 PI
59  */
60 #define SPDK_VHOST_SCSI_DISABLED_FEATURES	(SPDK_VHOST_DISABLED_FEATURES | \
61 						(1ULL << VIRTIO_SCSI_F_T10_PI ))
62 
63 #define MGMT_POLL_PERIOD_US (1000 * 5)
64 
65 #define VIRTIO_SCSI_CONTROLQ   0
66 #define VIRTIO_SCSI_EVENTQ   1
67 #define VIRTIO_SCSI_REQUESTQ   2
68 
69 struct spdk_scsi_dev_vhost_state {
70 	bool removed;
71 	spdk_vhost_event_fn remove_cb;
72 	void *remove_ctx;
73 };
74 
75 struct spdk_vhost_scsi_dev {
76 	struct spdk_vhost_dev vdev;
77 	struct spdk_scsi_dev *scsi_dev[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS];
78 	struct spdk_scsi_dev_vhost_state scsi_dev_state[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS];
79 
80 	struct spdk_ring *task_pool;
81 	struct spdk_poller *requestq_poller;
82 	struct spdk_poller *mgmt_poller;
83 } __rte_cache_aligned;
84 
85 struct spdk_vhost_scsi_task {
86 	struct spdk_scsi_task	scsi;
87 	struct iovec iovs[SPDK_VHOST_IOVS_MAX];
88 
89 	union {
90 		struct virtio_scsi_cmd_resp *resp;
91 		struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
92 	};
93 
94 	struct spdk_vhost_scsi_dev *svdev;
95 	struct spdk_scsi_dev *scsi_dev;
96 
97 	int req_idx;
98 
99 	struct rte_vhost_vring *vq;
100 };
101 
102 static int new_device(struct spdk_vhost_dev *, void *);
103 static int destroy_device(struct spdk_vhost_dev *, void *);
104 static void spdk_vhost_scsi_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
105 
106 const struct spdk_vhost_dev_backend spdk_vhost_scsi_device_backend = {
107 	.virtio_features = SPDK_VHOST_SCSI_FEATURES,
108 	.disabled_features = SPDK_VHOST_SCSI_DISABLED_FEATURES,
109 	.new_device =  new_device,
110 	.destroy_device = destroy_device,
111 	.dump_config_json = spdk_vhost_scsi_config_json,
112 	.vhost_remove_controller = spdk_vhost_scsi_dev_remove,
113 };
114 
115 static void
116 spdk_vhost_scsi_task_put(struct spdk_vhost_scsi_task *task)
117 {
118 	spdk_scsi_task_put(&task->scsi);
119 }
120 
121 static void
122 spdk_vhost_scsi_task_free_cb(struct spdk_scsi_task *scsi_task)
123 {
124 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
125 
126 	assert(task->svdev->vdev.task_cnt > 0);
127 	task->svdev->vdev.task_cnt--;
128 	spdk_ring_enqueue(task->svdev->task_pool, (void **) &task, 1);
129 }
130 
131 static void
132 spdk_vhost_get_tasks(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_scsi_task **tasks,
133 		     size_t count)
134 {
135 	size_t res_count;
136 
137 	res_count = spdk_ring_dequeue(svdev->task_pool, (void **)tasks, count);
138 	if (res_count != count) {
139 		SPDK_ERRLOG("%s: couldn't get %zu tasks from task_pool\n", svdev->vdev.name, count);
140 		/* FIXME: we should never run out of tasks, but what if we do? */
141 		abort();
142 	}
143 
144 	assert(svdev->vdev.task_cnt <= INT_MAX - (int) res_count);
145 	svdev->vdev.task_cnt += res_count;
146 }
147 
148 static void
149 process_removed_devs(struct spdk_vhost_scsi_dev *svdev)
150 {
151 	struct spdk_scsi_dev *dev;
152 	struct spdk_scsi_dev_vhost_state *state;
153 	int i;
154 
155 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) {
156 		dev = svdev->scsi_dev[i];
157 		state = &svdev->scsi_dev_state[i];
158 
159 		if (dev && state->removed && !spdk_scsi_dev_has_pending_tasks(dev)) {
160 			spdk_scsi_dev_free_io_channels(dev);
161 			spdk_scsi_dev_destruct(dev);
162 			svdev->scsi_dev[i] = NULL;
163 			if (state->remove_cb) {
164 				state->remove_cb(&svdev->vdev, state->remove_ctx);
165 				state->remove_cb = NULL;
166 			}
167 			SPDK_NOTICELOG("%s: hot-detached device 'Dev %u'.\n", svdev->vdev.name, i);
168 		}
169 	}
170 }
171 
172 static void
173 eventq_enqueue(struct spdk_vhost_scsi_dev *svdev, unsigned scsi_dev_num, uint32_t event,
174 	       uint32_t reason)
175 {
176 	struct rte_vhost_vring *vq;
177 	struct vring_desc *desc;
178 	struct virtio_scsi_event *desc_ev;
179 	uint32_t req_size;
180 	uint16_t req;
181 
182 	assert(scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
183 
184 	vq = &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ];
185 
186 	if (spdk_vhost_vq_avail_ring_get(vq, &req, 1) != 1) {
187 		SPDK_ERRLOG("Controller %s: Failed to send virtio event (no avail ring entries?).\n",
188 			    svdev->vdev.name);
189 		return;
190 	}
191 
192 	desc =  spdk_vhost_vq_get_desc(vq, req);
193 	desc_ev = spdk_vhost_gpa_to_vva(&svdev->vdev, desc->addr);
194 
195 	if (desc->len < sizeof(*desc_ev) || desc_ev == NULL) {
196 		SPDK_ERRLOG("Controller %s: Invalid eventq descriptor.\n", svdev->vdev.name);
197 		req_size = 0;
198 	} else {
199 		desc_ev->event = event;
200 		desc_ev->lun[0] = 1;
201 		desc_ev->lun[1] = scsi_dev_num;
202 		/* virtio LUN id 0 can refer either to the entire device
203 		 * or actual LUN 0 (the only supported by vhost for now)
204 		 */
205 		desc_ev->lun[2] = 0 >> 8;
206 		desc_ev->lun[3] = 0 & 0xFF;
207 		/* virtio doesn't specify any strict format for LUN id (bytes 2 and 3)
208 		 * current implementation relies on linux kernel sources
209 		 */
210 		memset(&desc_ev->lun[4], 0, 4);
211 		desc_ev->reason = reason;
212 		req_size = sizeof(*desc_ev);
213 	}
214 
215 	spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, req, req_size);
216 }
217 
218 static void
219 submit_completion(struct spdk_vhost_scsi_task *task)
220 {
221 	spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx,
222 					task->scsi.data_transferred);
223 	SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI, "Finished task (%p) req_idx=%d\n", task, task->req_idx);
224 
225 	spdk_vhost_scsi_task_put(task);
226 }
227 
228 static void
229 spdk_vhost_scsi_task_mgmt_cpl(struct spdk_scsi_task *scsi_task)
230 {
231 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
232 
233 	submit_completion(task);
234 }
235 
236 static void
237 spdk_vhost_scsi_task_cpl(struct spdk_scsi_task *scsi_task)
238 {
239 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
240 
241 	/* The SCSI task has completed.  Do final processing and then post
242 	   notification to the virtqueue's "used" ring.
243 	 */
244 	task->resp->status = task->scsi.status;
245 
246 	if (task->scsi.status != SPDK_SCSI_STATUS_GOOD) {
247 		memcpy(task->resp->sense, task->scsi.sense_data, task->scsi.sense_data_len);
248 		task->resp->sense_len = task->scsi.sense_data_len;
249 	}
250 	task->resp->resid = task->scsi.transfer_len - task->scsi.data_transferred;
251 
252 	submit_completion(task);
253 }
254 
255 static void
256 task_submit(struct spdk_vhost_scsi_task *task)
257 {
258 	task->resp->response = VIRTIO_SCSI_S_OK;
259 	spdk_scsi_dev_queue_task(task->scsi_dev, &task->scsi);
260 }
261 
262 static void
263 mgmt_task_submit(struct spdk_vhost_scsi_task *task, enum spdk_scsi_task_func func)
264 {
265 	task->tmf_resp->response = VIRTIO_SCSI_S_OK;
266 	spdk_scsi_dev_queue_mgmt_task(task->scsi_dev, &task->scsi, func);
267 }
268 
269 static void
270 invalid_request(struct spdk_vhost_scsi_task *task)
271 {
272 	spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx, 0);
273 	spdk_vhost_scsi_task_put(task);
274 
275 	SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI, "Invalid request (status=%" PRIu8")\n",
276 		      task->resp ? task->resp->response : -1);
277 }
278 
279 static int
280 spdk_vhost_scsi_task_init_target(struct spdk_vhost_scsi_task *task, const __u8 *lun)
281 {
282 	struct spdk_scsi_dev *dev;
283 	uint16_t lun_id = (((uint16_t)lun[2] << 8) | lun[3]) & 0x3FFF;
284 
285 	SPDK_TRACEDUMP(SPDK_TRACE_VHOST_SCSI_QUEUE, "LUN", lun, 8);
286 
287 	/* First byte must be 1 and second is target */
288 	if (lun[0] != 1 || lun[1] >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS)
289 		return -1;
290 
291 	dev = task->svdev->scsi_dev[lun[1]];
292 	task->scsi_dev = dev;
293 	if (dev == NULL) {
294 		/* If dev has been hotdetached, return 0 to allow sending
295 		 * additional hotremove event via sense codes.
296 		 */
297 		return task->svdev->scsi_dev_state[lun[1]].removed ? 0 : -1;
298 	}
299 
300 	task->scsi.target_port = spdk_scsi_dev_find_port_by_id(task->scsi_dev, 0);
301 	task->scsi.lun = spdk_scsi_dev_get_lun(dev, lun_id);
302 	return 0;
303 }
304 
305 static void
306 process_ctrl_request(struct spdk_vhost_scsi_task *task)
307 {
308 	struct vring_desc *desc;
309 	struct virtio_scsi_ctrl_tmf_req *ctrl_req;
310 	struct virtio_scsi_ctrl_an_resp *an_resp;
311 
312 	spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_mgmt_cpl, spdk_vhost_scsi_task_free_cb,
313 				 NULL);
314 	desc = spdk_vhost_vq_get_desc(task->vq, task->req_idx);
315 	ctrl_req = spdk_vhost_gpa_to_vva(&task->svdev->vdev, desc->addr);
316 
317 	SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI_QUEUE,
318 		      "Processing controlq descriptor: desc %d/%p, desc_addr %p, len %d, flags %d, last_used_idx %d; kickfd %d; size %d\n",
319 		      task->req_idx, desc, (void *)desc->addr, desc->len, desc->flags, task->vq->last_used_idx,
320 		      task->vq->kickfd, task->vq->size);
321 	SPDK_TRACEDUMP(SPDK_TRACE_VHOST_SCSI_QUEUE, "Request desriptor", (uint8_t *)ctrl_req,
322 		       desc->len);
323 
324 	spdk_vhost_scsi_task_init_target(task, ctrl_req->lun);
325 
326 	/* Process the TMF request */
327 	switch (ctrl_req->type) {
328 	case VIRTIO_SCSI_T_TMF:
329 		/* Get the response buffer */
330 		assert(spdk_vhost_vring_desc_has_next(desc));
331 		desc = spdk_vhost_vring_desc_get_next(task->vq->desc, desc);
332 		task->tmf_resp = spdk_vhost_gpa_to_vva(&task->svdev->vdev, desc->addr);
333 
334 		/* Check if we are processing a valid request */
335 		if (task->scsi_dev == NULL) {
336 			task->tmf_resp->response = VIRTIO_SCSI_S_BAD_TARGET;
337 			break;
338 		}
339 
340 		switch (ctrl_req->subtype) {
341 		case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
342 			/* Handle LUN reset */
343 			SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI_QUEUE, "LUN reset\n");
344 
345 			mgmt_task_submit(task, SPDK_SCSI_TASK_FUNC_LUN_RESET);
346 			return;
347 		default:
348 			task->tmf_resp->response = VIRTIO_SCSI_S_ABORTED;
349 			/* Unsupported command */
350 			SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI_QUEUE, "Unsupported TMF command %x\n", ctrl_req->subtype);
351 			break;
352 		}
353 		break;
354 	case VIRTIO_SCSI_T_AN_QUERY:
355 	case VIRTIO_SCSI_T_AN_SUBSCRIBE: {
356 		desc = spdk_vhost_vring_desc_get_next(task->vq->desc, desc);
357 		an_resp = spdk_vhost_gpa_to_vva(&task->svdev->vdev, desc->addr);
358 		an_resp->response = VIRTIO_SCSI_S_ABORTED;
359 		break;
360 	}
361 	default:
362 		SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI_QUEUE, "Unsupported control command %x\n", ctrl_req->type);
363 		break;
364 	}
365 
366 	spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx, 0);
367 	spdk_vhost_scsi_task_put(task);
368 }
369 
370 /*
371  * Process task's descriptor chain and setup data related fields.
372  * Return
373  *   -1 if request is invalid and must be aborted,
374  *    0 if all data are set.
375  */
376 static int
377 task_data_setup(struct spdk_vhost_scsi_task *task,
378 		struct virtio_scsi_cmd_req **req)
379 {
380 	struct rte_vhost_vring *vq = task->vq;
381 	struct spdk_vhost_dev *vdev = &task->svdev->vdev;
382 	struct vring_desc *desc =  spdk_vhost_vq_get_desc(task->vq, task->req_idx);
383 	struct iovec *iovs = task->iovs;
384 	uint16_t iovcnt = 0, iovcnt_max = SPDK_VHOST_IOVS_MAX;
385 	uint32_t len = 0;
386 
387 	/* Sanity check. First descriptor must be readable and must have next one. */
388 	if (spdk_unlikely(spdk_vhost_vring_desc_is_wr(desc) || !spdk_vhost_vring_desc_has_next(desc))) {
389 		SPDK_WARNLOG("Invalid first (request) descriptor.\n");
390 		task->resp = NULL;
391 		goto abort_task;
392 	}
393 
394 	spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_cpl, spdk_vhost_scsi_task_free_cb, NULL);
395 	*req = spdk_vhost_gpa_to_vva(vdev, desc->addr);
396 
397 	desc = spdk_vhost_vring_desc_get_next(vq->desc, desc);
398 	task->scsi.dxfer_dir = spdk_vhost_vring_desc_is_wr(desc) ? SPDK_SCSI_DIR_FROM_DEV :
399 			       SPDK_SCSI_DIR_TO_DEV;
400 	task->scsi.iovs = iovs;
401 
402 	if (task->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV) {
403 		/*
404 		 * FROM_DEV (READ): [RD_req][WR_resp][WR_buf0]...[WR_bufN]
405 		 */
406 		task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
407 		if (!spdk_vhost_vring_desc_has_next(desc)) {
408 			/*
409 			 * TEST UNIT READY command and some others might not contain any payload and this is not an error.
410 			 */
411 			SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI_DATA,
412 				      "No payload descriptors for FROM DEV command req_idx=%"PRIu16".\n", task->req_idx);
413 			SPDK_TRACEDUMP(SPDK_TRACE_VHOST_SCSI_DATA, "CDB=", (*req)->cdb, VIRTIO_SCSI_CDB_SIZE);
414 			task->scsi.iovcnt = 1;
415 			task->scsi.iovs[0].iov_len = 0;
416 			task->scsi.length = 0;
417 			task->scsi.transfer_len = 0;
418 			return 0;
419 		}
420 
421 		desc = spdk_vhost_vring_desc_get_next(vq->desc, desc);
422 
423 		/* All remaining descriptors are data. */
424 		while (iovcnt < iovcnt_max) {
425 			if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) {
426 				task->resp = NULL;
427 				goto abort_task;
428 			}
429 			len += desc->len;
430 
431 			if (!spdk_vhost_vring_desc_has_next(desc))
432 				break;
433 
434 			desc = spdk_vhost_vring_desc_get_next(vq->desc, desc);
435 			if (spdk_unlikely(!spdk_vhost_vring_desc_is_wr(desc))) {
436 				SPDK_WARNLOG("FROM DEV cmd: descriptor nr %" PRIu16" in payload chain is read only.\n", iovcnt);
437 				task->resp = NULL;
438 				goto abort_task;
439 			}
440 		}
441 	} else {
442 		SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI_DATA, "TO DEV");
443 		/*
444 		 * TO_DEV (WRITE):[RD_req][RD_buf0]...[RD_bufN][WR_resp]
445 		 * No need to check descriptor WR flag as this is done while setting scsi.dxfer_dir.
446 		 */
447 
448 		/* Process descriptors up to response. */
449 		while (!spdk_vhost_vring_desc_is_wr(desc) && iovcnt < iovcnt_max) {
450 			if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) {
451 				task->resp = NULL;
452 				goto abort_task;
453 			}
454 			len += desc->len;
455 
456 			if (!spdk_vhost_vring_desc_has_next(desc)) {
457 				SPDK_WARNLOG("TO_DEV cmd: no response descriptor.\n");
458 				task->resp = NULL;
459 				goto abort_task;
460 			}
461 
462 			desc = spdk_vhost_vring_desc_get_next(vq->desc, desc);
463 		}
464 
465 		task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
466 		if (spdk_vhost_vring_desc_has_next(desc)) {
467 			SPDK_WARNLOG("TO_DEV cmd: ignoring unexpected descriptors after response descriptor.\n");
468 		}
469 	}
470 
471 	if (iovcnt == iovcnt_max) {
472 		SPDK_WARNLOG("Too many IO vectors in chain!\n");
473 		goto abort_task;
474 	}
475 
476 	task->scsi.iovcnt = iovcnt;
477 	task->scsi.length = len;
478 	task->scsi.transfer_len = len;
479 	return 0;
480 
481 abort_task:
482 	if (task->resp) {
483 		task->resp->response = VIRTIO_SCSI_S_ABORTED;
484 	}
485 
486 	return -1;
487 }
488 
489 static int
490 process_request(struct spdk_vhost_scsi_task *task)
491 {
492 	struct virtio_scsi_cmd_req *req;
493 	int result;
494 
495 	result = task_data_setup(task, &req);
496 	if (result) {
497 		return result;
498 	}
499 
500 	result = spdk_vhost_scsi_task_init_target(task, req->lun);
501 	if (spdk_unlikely(result != 0)) {
502 		task->resp->response = VIRTIO_SCSI_S_BAD_TARGET;
503 		return -1;
504 	}
505 
506 	task->scsi.cdb = req->cdb;
507 	SPDK_TRACEDUMP(SPDK_TRACE_VHOST_SCSI_DATA, "request CDB", req->cdb, VIRTIO_SCSI_CDB_SIZE);
508 
509 	if (spdk_unlikely(task->scsi.lun == NULL)) {
510 		spdk_scsi_task_process_null_lun(&task->scsi);
511 		task->resp->response = VIRTIO_SCSI_S_OK;
512 		return 1;
513 	}
514 
515 	return 0;
516 }
517 
518 static void
519 process_controlq(struct spdk_vhost_scsi_dev *svdev, struct rte_vhost_vring *vq)
520 {
521 	struct spdk_vhost_scsi_task *tasks[32];
522 	struct spdk_vhost_scsi_task *task;
523 	uint16_t reqs[32];
524 	uint16_t reqs_cnt, i;
525 
526 	reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs));
527 	spdk_vhost_get_tasks(svdev, tasks, reqs_cnt);
528 	for (i = 0; i < reqs_cnt; i++) {
529 		task = tasks[i];
530 		memset(task, 0, sizeof(*task));
531 		task->vq = vq;
532 		task->svdev = svdev;
533 		task->req_idx = reqs[i];
534 
535 		process_ctrl_request(task);
536 	}
537 }
538 
539 static void
540 process_requestq(struct spdk_vhost_scsi_dev *svdev, struct rte_vhost_vring *vq)
541 {
542 	struct spdk_vhost_scsi_task *tasks[32];
543 	struct spdk_vhost_scsi_task *task;
544 	uint16_t reqs[32];
545 	uint16_t reqs_cnt, i;
546 	int result;
547 
548 	reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs));
549 	assert(reqs_cnt <= 32);
550 
551 	spdk_vhost_get_tasks(svdev, tasks, reqs_cnt);
552 
553 	for (i = 0; i < reqs_cnt; i++) {
554 		SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI, "====== Starting processing request idx %"PRIu16"======\n",
555 			      reqs[i]);
556 
557 		task = tasks[i];
558 		memset(task, 0, sizeof(*task));
559 		task->vq = vq;
560 		task->svdev = svdev;
561 		task->req_idx = reqs[i];
562 		result = process_request(task);
563 		if (likely(result == 0)) {
564 			task_submit(task);
565 			SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI, "====== Task %p req_idx %d submitted ======\n", task,
566 				      task->req_idx);
567 		} else if (result > 0) {
568 			spdk_vhost_scsi_task_cpl(&task->scsi);
569 			SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI, "====== Task %p req_idx %d finished early ======\n", task,
570 				      task->req_idx);
571 		} else {
572 			invalid_request(task);
573 			SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI, "====== Task %p req_idx %d failed ======\n", task,
574 				      task->req_idx);
575 		}
576 	}
577 }
578 
579 static void
580 vdev_mgmt_worker(void *arg)
581 {
582 	struct spdk_vhost_scsi_dev *svdev = arg;
583 
584 	process_removed_devs(svdev);
585 	process_controlq(svdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]);
586 }
587 
588 static void
589 vdev_worker(void *arg)
590 {
591 	struct spdk_vhost_scsi_dev *svdev = arg;
592 	uint32_t q_idx;
593 
594 	for (q_idx = VIRTIO_SCSI_REQUESTQ; q_idx < svdev->vdev.num_queues; q_idx++) {
595 		process_requestq(svdev, &svdev->vdev.virtqueue[q_idx]);
596 	}
597 }
598 
599 static struct spdk_vhost_scsi_dev *
600 to_scsi_dev(struct spdk_vhost_dev *ctrlr)
601 {
602 	if (ctrlr == NULL) {
603 		return NULL;
604 	}
605 
606 	if (ctrlr->type != SPDK_VHOST_DEV_T_SCSI) {
607 		SPDK_ERRLOG("Controller %s: expected SCSI controller (%d) but got %d\n",
608 			    ctrlr->name, SPDK_VHOST_DEV_T_SCSI, ctrlr->type);
609 		return NULL;
610 	}
611 
612 	return (struct spdk_vhost_scsi_dev *)ctrlr;
613 }
614 
615 int
616 spdk_vhost_scsi_dev_construct(const char *name, const char *cpumask)
617 {
618 	struct spdk_vhost_scsi_dev *svdev = spdk_dma_zmalloc(sizeof(struct spdk_vhost_scsi_dev),
619 					    SPDK_CACHE_LINE_SIZE, NULL);
620 	int rc;
621 
622 	if (svdev == NULL) {
623 		return -ENOMEM;
624 	}
625 
626 	spdk_vhost_lock();
627 	rc = spdk_vhost_dev_construct(&svdev->vdev, name, cpumask, SPDK_VHOST_DEV_T_SCSI,
628 				      &spdk_vhost_scsi_device_backend);
629 
630 	if (rc) {
631 		spdk_dma_free(svdev);
632 	}
633 
634 	spdk_vhost_unlock();
635 	return rc;
636 }
637 
638 int
639 spdk_vhost_scsi_dev_remove(struct spdk_vhost_dev *vdev)
640 {
641 	struct spdk_vhost_scsi_dev *svdev = to_scsi_dev(vdev);
642 	int rc, i;
643 
644 	if (svdev == NULL) {
645 		return -EINVAL;
646 	}
647 
648 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) {
649 		if (svdev->scsi_dev[i]) {
650 			SPDK_ERRLOG("Trying to remove non-empty controller: %s.\n", vdev->name);
651 			return -EBUSY;
652 		}
653 	}
654 
655 	rc = spdk_vhost_dev_remove(vdev);
656 	if (rc != 0) {
657 		return rc;
658 	}
659 
660 	spdk_dma_free(svdev);
661 	return 0;
662 }
663 
664 struct spdk_scsi_dev *
665 spdk_vhost_scsi_dev_get_dev(struct spdk_vhost_dev *vdev, uint8_t num)
666 {
667 	struct spdk_vhost_scsi_dev *svdev;
668 
669 	assert(num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
670 	svdev = to_scsi_dev(vdev);
671 
672 	return svdev ? svdev->scsi_dev[num] : NULL;
673 }
674 
675 static void
676 spdk_vhost_scsi_lun_hotremove(const struct spdk_scsi_lun *lun, void *arg)
677 {
678 	struct spdk_vhost_scsi_dev *svdev = arg;
679 	const struct spdk_scsi_dev *scsi_dev;
680 	unsigned scsi_dev_num;
681 
682 	assert(lun != NULL);
683 	assert(svdev != NULL);
684 	if (!spdk_vhost_dev_has_feature(&svdev->vdev, VIRTIO_SCSI_F_HOTPLUG)) {
685 		SPDK_WARNLOG("%s: hotremove is not enabled for this controller.\n", svdev->vdev.name);
686 		return;
687 	}
688 
689 	scsi_dev = spdk_scsi_lun_get_dev(lun);
690 	for (scsi_dev_num = 0; scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; scsi_dev_num++) {
691 		if (svdev->scsi_dev[scsi_dev_num] == scsi_dev) {
692 			break;
693 		}
694 	}
695 
696 	if (scsi_dev_num == SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
697 		SPDK_ERRLOG("Dev %s is not a part of vhost scsi controller '%s'.\n",
698 			    spdk_scsi_dev_get_name(scsi_dev),
699 			    svdev->vdev.name);
700 		return;
701 	}
702 
703 	eventq_enqueue(svdev, scsi_dev_num, VIRTIO_SCSI_T_TRANSPORT_RESET,
704 		       VIRTIO_SCSI_EVT_RESET_REMOVED);
705 }
706 
707 int
708 spdk_vhost_scsi_dev_add_dev(struct spdk_vhost_dev *vdev, unsigned scsi_dev_num,
709 			    const char *lun_name)
710 {
711 	struct spdk_vhost_scsi_dev *svdev;
712 	char dev_name[SPDK_SCSI_DEV_MAX_NAME];
713 	int lun_id_list[1];
714 	char *lun_names_list[1];
715 
716 	svdev = to_scsi_dev(vdev);
717 	if (svdev == NULL) {
718 		return -EINVAL;
719 	}
720 
721 	if (scsi_dev_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
722 		SPDK_ERRLOG("Controller %d device number too big (max %d)\n", scsi_dev_num,
723 			    SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
724 		return -EINVAL;
725 	}
726 
727 	if (lun_name == NULL) {
728 		SPDK_ERRLOG("No lun name specified \n");
729 		return -EINVAL;
730 	} else if (strlen(lun_name) >= SPDK_SCSI_DEV_MAX_NAME) {
731 		SPDK_ERRLOG("LUN name '%s' too long (max %d).\n", lun_name, SPDK_SCSI_DEV_MAX_NAME - 1);
732 		return -1;
733 	}
734 
735 	svdev = to_scsi_dev(vdev);
736 	if (svdev == NULL) {
737 		return -EINVAL;
738 	}
739 
740 	if (vdev->lcore != -1 && !spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
741 		SPDK_ERRLOG("Controller %s is in use and hotplug is not supported\n", vdev->name);
742 		return -ENOTSUP;
743 	}
744 
745 	if (svdev->scsi_dev[scsi_dev_num] != NULL) {
746 		SPDK_ERRLOG("Controller %s dev %u already occupied\n", vdev->name, scsi_dev_num);
747 		return -EEXIST;
748 	}
749 
750 	/*
751 	 * At this stage only one LUN per device
752 	 */
753 	snprintf(dev_name, sizeof(dev_name), "Dev %u", scsi_dev_num);
754 	lun_id_list[0] = 0;
755 	lun_names_list[0] = (char *)lun_name;
756 
757 	svdev->scsi_dev_state[scsi_dev_num].removed = false;
758 	svdev->scsi_dev[scsi_dev_num] = spdk_scsi_dev_construct(dev_name, lun_names_list, lun_id_list, 1,
759 					SPDK_SPC_PROTOCOL_IDENTIFIER_SAS, spdk_vhost_scsi_lun_hotremove, svdev);
760 
761 	if (svdev->scsi_dev[scsi_dev_num] == NULL) {
762 		SPDK_ERRLOG("Couldn't create spdk SCSI device '%s' using lun device '%s' in controller: %s\n",
763 			    dev_name, lun_name, vdev->name);
764 		return -EINVAL;
765 	}
766 	spdk_scsi_dev_add_port(svdev->scsi_dev[scsi_dev_num], 0, "vhost");
767 
768 	if (vdev->lcore != -1) {
769 		spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[scsi_dev_num]);
770 		eventq_enqueue(svdev, scsi_dev_num, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_RESCAN);
771 	}
772 
773 	SPDK_NOTICELOG("Controller %s: defined device '%s' using lun '%s'\n",
774 		       vdev->name, dev_name, lun_name);
775 	return 0;
776 }
777 
778 int
779 spdk_vhost_scsi_dev_remove_dev(struct spdk_vhost_dev *vdev, unsigned scsi_dev_num,
780 			       spdk_vhost_event_fn cb_fn, void *cb_arg)
781 {
782 	struct spdk_vhost_scsi_dev *svdev;
783 	struct spdk_scsi_dev *scsi_dev;
784 	struct spdk_scsi_dev_vhost_state *scsi_dev_state;
785 	int rc = 0;
786 
787 	if (scsi_dev_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
788 		SPDK_ERRLOG("%s: invalid device number %d\n", vdev->name, scsi_dev_num);
789 		return -EINVAL;
790 	}
791 
792 	svdev = to_scsi_dev(vdev);
793 	if (svdev == NULL) {
794 		return -ENODEV;
795 	}
796 
797 	scsi_dev = svdev->scsi_dev[scsi_dev_num];
798 	if (scsi_dev == NULL) {
799 		SPDK_ERRLOG("Controller %s dev %u is not occupied\n", vdev->name, scsi_dev_num);
800 		return -ENODEV;
801 	}
802 
803 	if (svdev->vdev.lcore == -1) {
804 		/* controller is not in use, remove dev and exit */
805 		spdk_scsi_dev_destruct(scsi_dev);
806 		svdev->scsi_dev[scsi_dev_num] = NULL;
807 		if (cb_fn) {
808 			rc = cb_fn(vdev, cb_arg);
809 		}
810 		SPDK_NOTICELOG("%s: removed device 'Dev %u'\n", vdev->name, scsi_dev_num);
811 		return rc;
812 	}
813 
814 	if (!spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
815 		SPDK_WARNLOG("%s: 'Dev %u' is in use and hot-detach is not enabled for this controller.\n",
816 			     svdev->vdev.name, scsi_dev_num);
817 		return -ENOTSUP;
818 	}
819 
820 	scsi_dev_state = &svdev->scsi_dev_state[scsi_dev_num];
821 	if (scsi_dev_state->removed) {
822 		SPDK_WARNLOG("%s: 'Dev %u' has been already marked to hotremove.\n", svdev->vdev.name,
823 			     scsi_dev_num);
824 		return -EBUSY;
825 	}
826 
827 	scsi_dev_state->remove_cb = cb_fn;
828 	scsi_dev_state->remove_ctx = cb_arg;
829 	scsi_dev_state->removed = true;
830 	eventq_enqueue(svdev, scsi_dev_num, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_REMOVED);
831 
832 	SPDK_NOTICELOG("%s: queued 'Dev %u' for hot-detach.\n", vdev->name, scsi_dev_num);
833 	return 0;
834 }
835 
836 int
837 spdk_vhost_scsi_controller_construct(void)
838 {
839 	struct spdk_conf_section *sp = spdk_conf_first_section(NULL);
840 	struct spdk_vhost_dev *vdev;
841 	int i, dev_num;
842 	unsigned ctrlr_num = 0;
843 	char *lun_name, *dev_num_str;
844 	char *cpumask;
845 	char *name;
846 
847 	while (sp != NULL) {
848 		if (!spdk_conf_section_match_prefix(sp, "VhostScsi")) {
849 			sp = spdk_conf_next_section(sp);
850 			continue;
851 		}
852 
853 		if (sscanf(spdk_conf_section_get_name(sp), "VhostScsi%u", &ctrlr_num) != 1) {
854 			SPDK_ERRLOG("Section '%s' has non-numeric suffix.\n",
855 				    spdk_conf_section_get_name(sp));
856 			return -1;
857 		}
858 
859 		name =  spdk_conf_section_get_val(sp, "Name");
860 		cpumask = spdk_conf_section_get_val(sp, "Cpumask");
861 
862 		if (spdk_vhost_scsi_dev_construct(name, cpumask) < 0) {
863 			return -1;
864 		}
865 
866 		vdev = spdk_vhost_dev_find(name);
867 		assert(vdev);
868 
869 		for (i = 0; spdk_conf_section_get_nval(sp, "Dev", i) != NULL; i++) {
870 			dev_num_str = spdk_conf_section_get_nmval(sp, "Dev", i, 0);
871 			if (dev_num_str == NULL) {
872 				SPDK_ERRLOG("%s: Invalid or missing Dev number\n", name);
873 				return -1;
874 			}
875 
876 			dev_num = (int)strtol(dev_num_str, NULL, 10);
877 			lun_name = spdk_conf_section_get_nmval(sp, "Dev", i, 1);
878 			if (lun_name == NULL) {
879 				SPDK_ERRLOG("%s: Invalid or missing LUN name for dev %d\n", name, dev_num);
880 				return -1;
881 			} else if (spdk_conf_section_get_nmval(sp, "Dev", i, 2)) {
882 				SPDK_ERRLOG("%s: Only one LUN per vhost SCSI device supported\n", name);
883 				return -1;
884 			}
885 
886 			if (spdk_vhost_scsi_dev_add_dev(vdev, dev_num, lun_name) < 0) {
887 				return -1;
888 			}
889 		}
890 
891 		sp = spdk_conf_next_section(sp);
892 
893 	}
894 
895 	return 0;
896 }
897 
898 static void
899 free_task_pool(struct spdk_vhost_scsi_dev *svdev)
900 {
901 	struct spdk_vhost_task *task;
902 
903 	if (!svdev->task_pool) {
904 		return;
905 	}
906 
907 	while (spdk_ring_dequeue(svdev->task_pool, (void **)&task, 1) == 1) {
908 		spdk_dma_free(task);
909 	}
910 
911 	spdk_ring_free(svdev->task_pool);
912 	svdev->task_pool = NULL;
913 }
914 
915 static int
916 alloc_task_pool(struct spdk_vhost_scsi_dev *svdev)
917 {
918 	struct spdk_vhost_scsi_task *task;
919 	uint32_t task_cnt = 0;
920 	uint32_t ring_size, socket_id;
921 	uint16_t i;
922 	int rc;
923 
924 	for (i = 0; i < svdev->vdev.num_queues; i++) {
925 		/*
926 		 * FIXME:
927 		 * this is too big because we need only size/2 from each queue but for now
928 		 * lets leave it as is to be sure we are not mistaken.
929 		 *
930 		 * Limit the pool size to 1024 * num_queues. This should be enough as QEMU have the
931 		 * same hard limit for queue size.
932 		 */
933 		task_cnt += spdk_min(svdev->vdev.virtqueue[i].size, 1024);
934 	}
935 
936 	ring_size = spdk_align32pow2(task_cnt + 1);
937 	socket_id = spdk_env_get_socket_id(svdev->vdev.lcore);
938 
939 	svdev->task_pool = spdk_ring_create(SPDK_RING_TYPE_SP_SC, ring_size, socket_id);
940 	if (svdev->task_pool == NULL) {
941 		SPDK_ERRLOG("Controller %s: Failed to init vhost scsi task pool\n", svdev->vdev.name);
942 		return -1;
943 	}
944 
945 	for (i = 0; i < task_cnt; ++i) {
946 		task = spdk_dma_zmalloc_socket(sizeof(*task), SPDK_CACHE_LINE_SIZE, NULL, socket_id);
947 		if (task == NULL) {
948 			SPDK_ERRLOG("Controller %s: Failed to allocate task\n", svdev->vdev.name);
949 			free_task_pool(svdev);
950 			return -1;
951 		}
952 
953 		rc = spdk_ring_enqueue(svdev->task_pool, (void **)&task, 1);
954 		if (rc != 1) {
955 			SPDK_ERRLOG("Controller %s: Failed to enuqueue %"PRIu32" vhost scsi tasks\n", svdev->vdev.name,
956 				    task_cnt);
957 			free_task_pool(svdev);
958 			return -1;
959 		}
960 	}
961 
962 	return 0;
963 }
964 
965 /*
966  * A new device is added to a data core. First the device is added to the main linked list
967  * and then allocated to a specific data core.
968  */
969 static int
970 new_device(struct spdk_vhost_dev *vdev, void *event_ctx)
971 {
972 	struct spdk_vhost_scsi_dev *svdev;
973 	uint32_t i;
974 	int rc;
975 
976 	svdev = to_scsi_dev(vdev);
977 	if (svdev == NULL) {
978 		SPDK_ERRLOG("Trying to start non-scsi controller as a scsi one.\n");
979 		rc = -1;
980 		goto out;
981 	}
982 
983 	rc = alloc_task_pool(svdev);
984 	if (rc != 0) {
985 		SPDK_ERRLOG("%s: failed to alloc task pool.\n", vdev->name);
986 		goto out;
987 	}
988 
989 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {
990 		if (svdev->scsi_dev[i] == NULL) {
991 			continue;
992 		}
993 		spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[i]);
994 	}
995 	SPDK_NOTICELOG("Started poller for vhost controller %s on lcore %d\n", vdev->name, vdev->lcore);
996 
997 	spdk_vhost_dev_mem_register(vdev);
998 
999 	spdk_poller_register(&svdev->requestq_poller, vdev_worker, svdev, vdev->lcore, 0);
1000 	spdk_poller_register(&svdev->mgmt_poller, vdev_mgmt_worker, svdev, vdev->lcore,
1001 			     MGMT_POLL_PERIOD_US);
1002 out:
1003 	spdk_vhost_dev_backend_event_done(event_ctx, rc);
1004 	return rc;
1005 }
1006 
1007 struct spdk_vhost_dev_destroy_ctx {
1008 	struct spdk_vhost_scsi_dev *svdev;
1009 	struct spdk_poller *poller;
1010 	void *event_ctx;
1011 };
1012 
1013 static void
1014 destroy_device_poller_cb(void *arg)
1015 {
1016 	struct spdk_vhost_dev_destroy_ctx *ctx = arg;
1017 	struct spdk_vhost_scsi_dev *svdev = ctx->svdev;
1018 	uint32_t i;
1019 
1020 	if (svdev->vdev.task_cnt > 0) {
1021 		return;
1022 	}
1023 
1024 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {
1025 		if (svdev->scsi_dev[i] == NULL) {
1026 			continue;
1027 		}
1028 		spdk_scsi_dev_free_io_channels(svdev->scsi_dev[i]);
1029 	}
1030 
1031 	SPDK_NOTICELOG("Stopping poller for vhost controller %s\n", svdev->vdev.name);
1032 	spdk_vhost_dev_mem_unregister(&svdev->vdev);
1033 
1034 	free_task_pool(svdev);
1035 
1036 	spdk_poller_unregister(&ctx->poller, NULL);
1037 	spdk_vhost_dev_backend_event_done(ctx->event_ctx, 0);
1038 }
1039 
1040 static int
1041 destroy_device(struct spdk_vhost_dev *vdev, void *event_ctx)
1042 {
1043 	struct spdk_vhost_scsi_dev *svdev;
1044 	struct spdk_vhost_dev_destroy_ctx *destroy_ctx;
1045 
1046 	svdev = to_scsi_dev(vdev);
1047 	if (svdev == NULL) {
1048 		SPDK_ERRLOG("Trying to stop non-scsi controller as a scsi one.\n");
1049 		goto err;
1050 	}
1051 
1052 	destroy_ctx = spdk_dma_zmalloc(sizeof(*destroy_ctx), SPDK_CACHE_LINE_SIZE, NULL);
1053 	if (destroy_ctx == NULL) {
1054 		SPDK_ERRLOG("Failed to alloc memory for destroying device.\n");
1055 		goto err;
1056 	}
1057 
1058 	destroy_ctx->svdev = svdev;
1059 	destroy_ctx->event_ctx = event_ctx;
1060 
1061 	spdk_poller_unregister(&svdev->requestq_poller, NULL);
1062 	spdk_poller_unregister(&svdev->mgmt_poller, NULL);
1063 	spdk_poller_register(&destroy_ctx->poller, destroy_device_poller_cb, destroy_ctx, vdev->lcore,
1064 			     1000);
1065 
1066 	return 0;
1067 
1068 err:
1069 	spdk_vhost_dev_backend_event_done(event_ctx, -1);
1070 	return -1;
1071 }
1072 
1073 int
1074 spdk_vhost_init(void)
1075 {
1076 	return 0;
1077 }
1078 
1079 int
1080 spdk_vhost_fini(void)
1081 {
1082 	return 0;
1083 }
1084 
1085 static void
1086 spdk_vhost_scsi_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
1087 {
1088 	struct spdk_scsi_dev *sdev;
1089 	struct spdk_scsi_lun *lun;
1090 	uint32_t dev_idx;
1091 	uint32_t lun_idx;
1092 
1093 	assert(vdev != NULL);
1094 	spdk_json_write_name(w, "scsi");
1095 	spdk_json_write_object_begin(w);
1096 	for (dev_idx = 0; dev_idx < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; dev_idx++) {
1097 		sdev = spdk_vhost_scsi_dev_get_dev(vdev, dev_idx);
1098 		if (!sdev) {
1099 			continue;
1100 		}
1101 
1102 		spdk_json_write_name(w, "scsi_dev_num");
1103 		spdk_json_write_uint32(w, dev_idx);
1104 
1105 		spdk_json_write_name(w, "id");
1106 		spdk_json_write_int32(w, spdk_scsi_dev_get_id(sdev));
1107 
1108 		spdk_json_write_name(w, "device_name");
1109 		spdk_json_write_string(w, spdk_scsi_dev_get_name(sdev));
1110 
1111 		spdk_json_write_name(w, "luns");
1112 		spdk_json_write_array_begin(w);
1113 
1114 		for (lun_idx = 0; lun_idx < SPDK_SCSI_DEV_MAX_LUN; lun_idx++) {
1115 			lun = spdk_scsi_dev_get_lun(sdev, lun_idx);
1116 			if (!lun) {
1117 				continue;
1118 			}
1119 
1120 			spdk_json_write_object_begin(w);
1121 
1122 			spdk_json_write_name(w, "id");
1123 			spdk_json_write_int32(w, spdk_scsi_lun_get_id(lun));
1124 
1125 			spdk_json_write_name(w, "name");
1126 			spdk_json_write_string(w, spdk_scsi_lun_get_name(lun));
1127 
1128 			spdk_json_write_object_end(w);
1129 		}
1130 
1131 		spdk_json_write_array_end(w);
1132 	}
1133 
1134 	spdk_json_write_object_end(w);
1135 }
1136 
1137 SPDK_LOG_REGISTER_TRACE_FLAG("vhost_scsi", SPDK_TRACE_VHOST_SCSI)
1138 SPDK_LOG_REGISTER_TRACE_FLAG("vhost_scsi_queue", SPDK_TRACE_VHOST_SCSI_QUEUE)
1139 SPDK_LOG_REGISTER_TRACE_FLAG("vhost_scsi_data", SPDK_TRACE_VHOST_SCSI_DATA)
1140