xref: /spdk/lib/vhost/vhost_scsi.c (revision 450e2b88c79ca8bb0d0f142a277b35e066fbc042)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include <linux/virtio_scsi.h>
37 
38 #include "spdk/env.h"
39 #include "spdk/scsi.h"
40 #include "spdk/scsi_spec.h"
41 #include "spdk/conf.h"
42 #include "spdk/event.h"
43 #include "spdk/util.h"
44 #include "spdk/likely.h"
45 
46 #include "spdk/vhost.h"
47 #include "vhost_internal.h"
48 
49 /* Features supported by SPDK VHOST lib. */
50 #define SPDK_VHOST_SCSI_FEATURES	(SPDK_VHOST_FEATURES | \
51 					(1ULL << VIRTIO_SCSI_F_INOUT) | \
52 					(1ULL << VIRTIO_SCSI_F_HOTPLUG) | \
53 					(1ULL << VIRTIO_SCSI_F_CHANGE ) | \
54 					(1ULL << VIRTIO_SCSI_F_T10_PI ))
55 
56 /* Features that are specified in VIRTIO SCSI but currently not supported:
57  * - Live migration not supported yet
58  * - T10 PI
59  */
60 #define SPDK_VHOST_SCSI_DISABLED_FEATURES	(SPDK_VHOST_DISABLED_FEATURES | \
61 						(1ULL << VIRTIO_SCSI_F_T10_PI ))
62 
63 #define MGMT_POLL_PERIOD_US (1000 * 5)
64 
65 #define VIRTIO_SCSI_CONTROLQ   0
66 #define VIRTIO_SCSI_EVENTQ   1
67 #define VIRTIO_SCSI_REQUESTQ   2
68 
69 struct spdk_scsi_dev_vhost_state {
70 	bool removed;
71 	spdk_vhost_event_fn remove_cb;
72 	void *remove_ctx;
73 };
74 
75 struct spdk_vhost_scsi_dev {
76 	struct spdk_vhost_dev vdev;
77 	struct spdk_scsi_dev *scsi_dev[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS];
78 	struct spdk_scsi_dev_vhost_state scsi_dev_state[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS];
79 
80 	struct spdk_poller *requestq_poller;
81 	struct spdk_poller *mgmt_poller;
82 } __rte_cache_aligned;
83 
84 struct spdk_vhost_scsi_task {
85 	struct spdk_scsi_task	scsi;
86 	struct iovec iovs[SPDK_VHOST_IOVS_MAX];
87 
88 	union {
89 		struct virtio_scsi_cmd_resp *resp;
90 		struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
91 	};
92 
93 	struct spdk_vhost_scsi_dev *svdev;
94 	struct spdk_scsi_dev *scsi_dev;
95 
96 	int req_idx;
97 
98 	/* If set, the task is currently used for I/O processing. */
99 	bool used;
100 
101 	struct spdk_vhost_virtqueue *vq;
102 };
103 
104 static int spdk_vhost_scsi_start(struct spdk_vhost_dev *, void *);
105 static int spdk_vhost_scsi_stop(struct spdk_vhost_dev *, void *);
106 static void spdk_vhost_scsi_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
107 
108 const struct spdk_vhost_dev_backend spdk_vhost_scsi_device_backend = {
109 	.virtio_features = SPDK_VHOST_SCSI_FEATURES,
110 	.disabled_features = SPDK_VHOST_SCSI_DISABLED_FEATURES,
111 	.start_device =  spdk_vhost_scsi_start,
112 	.stop_device = spdk_vhost_scsi_stop,
113 	.dump_config_json = spdk_vhost_scsi_config_json,
114 	.vhost_remove_controller = spdk_vhost_scsi_dev_remove,
115 };
116 
117 static void
118 spdk_vhost_scsi_task_put(struct spdk_vhost_scsi_task *task)
119 {
120 	spdk_scsi_task_put(&task->scsi);
121 }
122 
123 static void
124 spdk_vhost_scsi_task_free_cb(struct spdk_scsi_task *scsi_task)
125 {
126 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
127 
128 	assert(task->svdev->vdev.task_cnt > 0);
129 	task->svdev->vdev.task_cnt--;
130 	task->used = false;
131 }
132 
133 static void
134 process_removed_devs(struct spdk_vhost_scsi_dev *svdev)
135 {
136 	struct spdk_scsi_dev *dev;
137 	struct spdk_scsi_dev_vhost_state *state;
138 	int i;
139 
140 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) {
141 		dev = svdev->scsi_dev[i];
142 		state = &svdev->scsi_dev_state[i];
143 
144 		if (dev && state->removed && !spdk_scsi_dev_has_pending_tasks(dev)) {
145 			spdk_scsi_dev_free_io_channels(dev);
146 			spdk_scsi_dev_destruct(dev);
147 			svdev->scsi_dev[i] = NULL;
148 			if (state->remove_cb) {
149 				state->remove_cb(&svdev->vdev, state->remove_ctx);
150 				state->remove_cb = NULL;
151 			}
152 			SPDK_NOTICELOG("%s: hot-detached device 'Dev %u'.\n", svdev->vdev.name, i);
153 		}
154 	}
155 }
156 
157 static void
158 eventq_enqueue(struct spdk_vhost_scsi_dev *svdev, unsigned scsi_dev_num, uint32_t event,
159 	       uint32_t reason)
160 {
161 	struct spdk_vhost_virtqueue *vq;
162 	struct vring_desc *desc, *desc_table;
163 	struct virtio_scsi_event *desc_ev;
164 	uint32_t desc_table_size, req_size = 0;
165 	uint16_t req;
166 	int rc;
167 
168 	assert(scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
169 	vq = &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ];
170 
171 	if (spdk_vhost_vq_avail_ring_get(vq, &req, 1) != 1) {
172 		SPDK_ERRLOG("Controller %s: Failed to send virtio event (no avail ring entries?).\n",
173 			    svdev->vdev.name);
174 		return;
175 	}
176 
177 	rc = spdk_vhost_vq_get_desc(&svdev->vdev, vq, req, &desc, &desc_table, &desc_table_size);
178 	if (rc != 0 || desc->len < sizeof(*desc_ev)) {
179 		SPDK_ERRLOG("Controller %s: Invalid eventq descriptor at index %"PRIu16".\n",
180 			    svdev->vdev.name, req);
181 		goto out;
182 	}
183 
184 	desc_ev = spdk_vhost_gpa_to_vva(&svdev->vdev, desc->addr);
185 	if (desc_ev == NULL) {
186 		SPDK_ERRLOG("Controller %s: Eventq descriptor at index %"PRIu16" points to unmapped guest memory address %p.\n",
187 			    svdev->vdev.name, req, (void *)(uintptr_t)desc->addr);
188 		goto out;
189 	}
190 
191 	desc_ev->event = event;
192 	desc_ev->lun[0] = 1;
193 	desc_ev->lun[1] = scsi_dev_num;
194 	/* virtio LUN id 0 can refer either to the entire device
195 	 * or actual LUN 0 (the only supported by vhost for now)
196 	 */
197 	desc_ev->lun[2] = 0 >> 8;
198 	desc_ev->lun[3] = 0 & 0xFF;
199 	/* virtio doesn't specify any strict format for LUN id (bytes 2 and 3)
200 	 * current implementation relies on linux kernel sources
201 	 */
202 	memset(&desc_ev->lun[4], 0, 4);
203 	desc_ev->reason = reason;
204 	req_size = sizeof(*desc_ev);
205 
206 out:
207 	spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, req, req_size);
208 }
209 
210 static void
211 submit_completion(struct spdk_vhost_scsi_task *task)
212 {
213 	spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx,
214 					task->scsi.data_transferred);
215 	SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI, "Finished task (%p) req_idx=%d\n", task, task->req_idx);
216 
217 	spdk_vhost_scsi_task_put(task);
218 }
219 
220 static void
221 spdk_vhost_scsi_task_mgmt_cpl(struct spdk_scsi_task *scsi_task)
222 {
223 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
224 
225 	submit_completion(task);
226 }
227 
228 static void
229 spdk_vhost_scsi_task_cpl(struct spdk_scsi_task *scsi_task)
230 {
231 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
232 
233 	/* The SCSI task has completed.  Do final processing and then post
234 	   notification to the virtqueue's "used" ring.
235 	 */
236 	task->resp->status = task->scsi.status;
237 
238 	if (task->scsi.status != SPDK_SCSI_STATUS_GOOD) {
239 		memcpy(task->resp->sense, task->scsi.sense_data, task->scsi.sense_data_len);
240 		task->resp->sense_len = task->scsi.sense_data_len;
241 	}
242 	task->resp->resid = task->scsi.transfer_len - task->scsi.data_transferred;
243 
244 	submit_completion(task);
245 }
246 
247 static void
248 task_submit(struct spdk_vhost_scsi_task *task)
249 {
250 	task->resp->response = VIRTIO_SCSI_S_OK;
251 	spdk_scsi_dev_queue_task(task->scsi_dev, &task->scsi);
252 }
253 
254 static void
255 mgmt_task_submit(struct spdk_vhost_scsi_task *task, enum spdk_scsi_task_func func)
256 {
257 	task->tmf_resp->response = VIRTIO_SCSI_S_OK;
258 	spdk_scsi_dev_queue_mgmt_task(task->scsi_dev, &task->scsi, func);
259 }
260 
261 static void
262 invalid_request(struct spdk_vhost_scsi_task *task)
263 {
264 	spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx, 0);
265 	spdk_vhost_scsi_task_put(task);
266 
267 	SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI, "Invalid request (status=%" PRIu8")\n",
268 		      task->resp ? task->resp->response : -1);
269 }
270 
271 static int
272 spdk_vhost_scsi_task_init_target(struct spdk_vhost_scsi_task *task, const __u8 *lun)
273 {
274 	struct spdk_scsi_dev *dev;
275 	uint16_t lun_id = (((uint16_t)lun[2] << 8) | lun[3]) & 0x3FFF;
276 
277 	SPDK_TRACEDUMP(SPDK_TRACE_VHOST_SCSI_QUEUE, "LUN", lun, 8);
278 
279 	/* First byte must be 1 and second is target */
280 	if (lun[0] != 1 || lun[1] >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS)
281 		return -1;
282 
283 	dev = task->svdev->scsi_dev[lun[1]];
284 	task->scsi_dev = dev;
285 	if (dev == NULL || task->svdev->scsi_dev_state[lun[1]].removed) {
286 		/* If dev has been hotdetached, return 0 to allow sending
287 		 * additional hotremove event via sense codes.
288 		 */
289 		return task->svdev->scsi_dev_state[lun[1]].removed ? 0 : -1;
290 	}
291 
292 	task->scsi.target_port = spdk_scsi_dev_find_port_by_id(task->scsi_dev, 0);
293 	task->scsi.lun = spdk_scsi_dev_get_lun(dev, lun_id);
294 	return 0;
295 }
296 
297 static void
298 process_ctrl_request(struct spdk_vhost_scsi_task *task)
299 {
300 	struct spdk_vhost_dev *vdev = &task->svdev->vdev;
301 	struct vring_desc *desc, *desc_table;
302 	struct virtio_scsi_ctrl_tmf_req *ctrl_req;
303 	struct virtio_scsi_ctrl_an_resp *an_resp;
304 	uint32_t desc_table_size;
305 	int rc;
306 
307 	spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_mgmt_cpl, spdk_vhost_scsi_task_free_cb,
308 				 NULL);
309 	rc = spdk_vhost_vq_get_desc(vdev, task->vq, task->req_idx, &desc, &desc_table, &desc_table_size);
310 	if (spdk_unlikely(rc != 0)) {
311 		SPDK_ERRLOG("%s: Invalid controlq descriptor at index %d.\n",
312 			    vdev->name, task->req_idx);
313 		goto out;
314 	}
315 
316 	ctrl_req = spdk_vhost_gpa_to_vva(vdev, desc->addr);
317 
318 	SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI_QUEUE,
319 		      "Processing controlq descriptor: desc %d/%p, desc_addr %p, len %d, flags %d, last_used_idx %d; kickfd %d; size %d\n",
320 		      task->req_idx, desc, (void *)desc->addr, desc->len, desc->flags, task->vq->vring.last_used_idx,
321 		      task->vq->vring.kickfd, task->vq->vring.size);
322 	SPDK_TRACEDUMP(SPDK_TRACE_VHOST_SCSI_QUEUE, "Request descriptor", (uint8_t *)ctrl_req,
323 		       desc->len);
324 
325 	spdk_vhost_scsi_task_init_target(task, ctrl_req->lun);
326 
327 	spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_size);
328 	if (spdk_unlikely(desc == NULL)) {
329 		SPDK_ERRLOG("%s: No response descriptor for controlq request %d.\n",
330 			    vdev->name, task->req_idx);
331 		goto out;
332 	}
333 
334 	/* Process the TMF request */
335 	switch (ctrl_req->type) {
336 	case VIRTIO_SCSI_T_TMF:
337 		task->tmf_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
338 
339 		/* Check if we are processing a valid request */
340 		if (task->scsi_dev == NULL) {
341 			task->tmf_resp->response = VIRTIO_SCSI_S_BAD_TARGET;
342 			break;
343 		}
344 
345 		switch (ctrl_req->subtype) {
346 		case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
347 			/* Handle LUN reset */
348 			SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI_QUEUE, "LUN reset\n");
349 
350 			mgmt_task_submit(task, SPDK_SCSI_TASK_FUNC_LUN_RESET);
351 			return;
352 		default:
353 			task->tmf_resp->response = VIRTIO_SCSI_S_ABORTED;
354 			/* Unsupported command */
355 			SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI_QUEUE, "Unsupported TMF command %x\n", ctrl_req->subtype);
356 			break;
357 		}
358 		break;
359 	case VIRTIO_SCSI_T_AN_QUERY:
360 	case VIRTIO_SCSI_T_AN_SUBSCRIBE: {
361 		an_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
362 		an_resp->response = VIRTIO_SCSI_S_ABORTED;
363 		break;
364 	}
365 	default:
366 		SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI_QUEUE, "Unsupported control command %x\n", ctrl_req->type);
367 		break;
368 	}
369 
370 out:
371 	spdk_vhost_vq_used_ring_enqueue(vdev, task->vq, task->req_idx, 0);
372 	spdk_vhost_scsi_task_put(task);
373 }
374 
375 /*
376  * Process task's descriptor chain and setup data related fields.
377  * Return
378  *   -1 if request is invalid and must be aborted,
379  *    0 if all data are set.
380  */
381 static int
382 task_data_setup(struct spdk_vhost_scsi_task *task,
383 		struct virtio_scsi_cmd_req **req)
384 {
385 	struct spdk_vhost_dev *vdev = &task->svdev->vdev;
386 	struct vring_desc *desc, *desc_table;
387 	struct iovec *iovs = task->iovs;
388 	uint16_t iovcnt = 0, iovcnt_max = SPDK_VHOST_IOVS_MAX;
389 	uint32_t desc_table_len, len = 0;
390 	int rc;
391 
392 	spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_cpl, spdk_vhost_scsi_task_free_cb, NULL);
393 
394 	rc = spdk_vhost_vq_get_desc(vdev, task->vq, task->req_idx, &desc, &desc_table, &desc_table_len);
395 	/* First descriptor must be readable */
396 	if (rc != 0 || spdk_unlikely(spdk_vhost_vring_desc_is_wr(desc))) {
397 		SPDK_WARNLOG("%s: invalid first (request) descriptor at index %"PRIu16".\n",
398 			     vdev->name, task->req_idx);
399 		goto invalid_task;
400 	}
401 
402 	*req = spdk_vhost_gpa_to_vva(vdev, desc->addr);
403 
404 	/* Each request must have at least 2 descriptors (e.g. request and response) */
405 	spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len);
406 	if (desc == NULL) {
407 		SPDK_WARNLOG("%s: Descriptor chain at index %d contains neither payload nor response buffer.\n",
408 			     vdev->name, task->req_idx);
409 		goto invalid_task;
410 	}
411 	task->scsi.dxfer_dir = spdk_vhost_vring_desc_is_wr(desc) ? SPDK_SCSI_DIR_FROM_DEV :
412 			       SPDK_SCSI_DIR_TO_DEV;
413 	task->scsi.iovs = iovs;
414 
415 	if (task->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV) {
416 		/*
417 		 * FROM_DEV (READ): [RD_req][WR_resp][WR_buf0]...[WR_bufN]
418 		 */
419 		task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
420 
421 		rc = spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len);
422 		if (spdk_unlikely(rc != 0)) {
423 			SPDK_WARNLOG("%s: invalid descriptor chain at request index %d (descriptor id overflow?).\n",
424 				     vdev->name, task->req_idx);
425 			goto invalid_task;
426 		}
427 
428 		if (desc == NULL) {
429 			/*
430 			 * TEST UNIT READY command and some others might not contain any payload and this is not an error.
431 			 */
432 			SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI_DATA,
433 				      "No payload descriptors for FROM DEV command req_idx=%"PRIu16".\n", task->req_idx);
434 			SPDK_TRACEDUMP(SPDK_TRACE_VHOST_SCSI_DATA, "CDB=", (*req)->cdb, VIRTIO_SCSI_CDB_SIZE);
435 			task->scsi.iovcnt = 1;
436 			task->scsi.iovs[0].iov_len = 0;
437 			task->scsi.length = 0;
438 			task->scsi.transfer_len = 0;
439 			return 0;
440 		}
441 
442 		/* All remaining descriptors are data. */
443 		while (desc && iovcnt < iovcnt_max) {
444 			if (spdk_unlikely(!spdk_vhost_vring_desc_is_wr(desc))) {
445 				SPDK_WARNLOG("FROM DEV cmd: descriptor nr %" PRIu16" in payload chain is read only.\n", iovcnt);
446 				goto invalid_task;
447 			}
448 
449 			if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) {
450 				goto invalid_task;
451 			}
452 			len += desc->len;
453 
454 			rc = spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len);
455 			if (spdk_unlikely(rc != 0)) {
456 				SPDK_WARNLOG("%s: invalid payload in descriptor chain starting at index %d.\n",
457 					     vdev->name, task->req_idx);
458 				goto invalid_task;
459 			}
460 		}
461 	} else {
462 		SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI_DATA, "TO DEV");
463 		/*
464 		 * TO_DEV (WRITE):[RD_req][RD_buf0]...[RD_bufN][WR_resp]
465 		 * No need to check descriptor WR flag as this is done while setting scsi.dxfer_dir.
466 		 */
467 
468 		/* Process descriptors up to response. */
469 		while (!spdk_vhost_vring_desc_is_wr(desc) && iovcnt < iovcnt_max) {
470 			if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) {
471 				goto invalid_task;
472 			}
473 			len += desc->len;
474 
475 			spdk_vhost_vring_desc_get_next(&desc, desc_table, desc_table_len);
476 			if (spdk_unlikely(desc == NULL)) {
477 				SPDK_WARNLOG("TO_DEV cmd: no response descriptor.\n");
478 				goto invalid_task;
479 			}
480 		}
481 
482 		task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
483 	}
484 
485 	if (iovcnt == iovcnt_max) {
486 		SPDK_WARNLOG("Too many IO vectors in chain!\n");
487 		task->resp->response = VIRTIO_SCSI_S_ABORTED;
488 		goto invalid_task;
489 	}
490 
491 	task->scsi.iovcnt = iovcnt;
492 	task->scsi.length = len;
493 	task->scsi.transfer_len = len;
494 	return 0;
495 
496 invalid_task:
497 	SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI_DATA, "%s: Invalid task at index %"PRIu16".\n",
498 		      vdev->name, task->req_idx);
499 	return -1;
500 }
501 
502 static int
503 process_request(struct spdk_vhost_scsi_task *task)
504 {
505 	struct virtio_scsi_cmd_req *req;
506 	int result;
507 
508 	result = task_data_setup(task, &req);
509 	if (result) {
510 		return result;
511 	}
512 
513 	result = spdk_vhost_scsi_task_init_target(task, req->lun);
514 	if (spdk_unlikely(result != 0)) {
515 		task->resp->response = VIRTIO_SCSI_S_BAD_TARGET;
516 		return -1;
517 	}
518 
519 	task->scsi.cdb = req->cdb;
520 	SPDK_TRACEDUMP(SPDK_TRACE_VHOST_SCSI_DATA, "request CDB", req->cdb, VIRTIO_SCSI_CDB_SIZE);
521 
522 	if (spdk_unlikely(task->scsi.lun == NULL)) {
523 		spdk_scsi_task_process_null_lun(&task->scsi);
524 		task->resp->response = VIRTIO_SCSI_S_OK;
525 		return 1;
526 	}
527 
528 	return 0;
529 }
530 
531 static void
532 process_controlq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq)
533 {
534 	struct spdk_vhost_scsi_task *task;
535 	uint16_t reqs[32];
536 	uint16_t reqs_cnt, i;
537 
538 	reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs));
539 	for (i = 0; i < reqs_cnt; i++) {
540 		if (spdk_unlikely(reqs[i] >= vq->vring.size)) {
541 			SPDK_ERRLOG("%s: invalid entry in avail ring. Buffer '%"PRIu16"' exceeds virtqueue size (%"PRIu16")\n",
542 				    svdev->vdev.name, reqs[i], vq->vring.size);
543 			spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
544 			continue;
545 		}
546 
547 		task = &((struct spdk_vhost_scsi_task *)vq->tasks)[reqs[i]];
548 		if (spdk_unlikely(task->used)) {
549 			SPDK_ERRLOG("%s: invalid entry in avail ring. Buffer '%"PRIu16"' is still in use!\n",
550 				    svdev->vdev.name, reqs[i]);
551 			spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
552 			continue;
553 		}
554 
555 		svdev->vdev.task_cnt++;
556 		memset(&task->scsi, 0, sizeof(task->scsi));
557 		task->tmf_resp = NULL;
558 		task->used = true;
559 		process_ctrl_request(task);
560 	}
561 }
562 
563 static void
564 process_requestq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq)
565 {
566 	struct spdk_vhost_scsi_task *task;
567 	uint16_t reqs[32];
568 	uint16_t reqs_cnt, i;
569 	int result;
570 
571 	reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs));
572 	assert(reqs_cnt <= 32);
573 
574 	for (i = 0; i < reqs_cnt; i++) {
575 		SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI, "====== Starting processing request idx %"PRIu16"======\n",
576 			      reqs[i]);
577 
578 		if (spdk_unlikely(reqs[i] >= vq->vring.size)) {
579 			SPDK_ERRLOG("%s: request idx '%"PRIu16"' exceeds virtqueue size (%"PRIu16").\n",
580 				    svdev->vdev.name, reqs[i], vq->vring.size);
581 			spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
582 			continue;
583 		}
584 
585 		task = &((struct spdk_vhost_scsi_task *)vq->tasks)[reqs[i]];
586 		if (spdk_unlikely(task->used)) {
587 			SPDK_ERRLOG("%s: request with idx '%"PRIu16"' is already pending.\n",
588 				    svdev->vdev.name, reqs[i]);
589 			spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
590 			continue;
591 		}
592 
593 		svdev->vdev.task_cnt++;
594 		memset(&task->scsi, 0, sizeof(task->scsi));
595 		task->resp = NULL;
596 		task->used = true;
597 		result = process_request(task);
598 		if (likely(result == 0)) {
599 			task_submit(task);
600 			SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI, "====== Task %p req_idx %d submitted ======\n", task,
601 				      task->req_idx);
602 		} else if (result > 0) {
603 			spdk_vhost_scsi_task_cpl(&task->scsi);
604 			SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI, "====== Task %p req_idx %d finished early ======\n", task,
605 				      task->req_idx);
606 		} else {
607 			invalid_request(task);
608 			SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI, "====== Task %p req_idx %d failed ======\n", task,
609 				      task->req_idx);
610 		}
611 	}
612 }
613 
614 static void
615 vdev_mgmt_worker(void *arg)
616 {
617 	struct spdk_vhost_scsi_dev *svdev = arg;
618 
619 	process_removed_devs(svdev);
620 	process_controlq(svdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]);
621 }
622 
623 static void
624 vdev_worker(void *arg)
625 {
626 	struct spdk_vhost_scsi_dev *svdev = arg;
627 	uint32_t q_idx;
628 
629 	for (q_idx = VIRTIO_SCSI_REQUESTQ; q_idx < svdev->vdev.num_queues; q_idx++) {
630 		process_requestq(svdev, &svdev->vdev.virtqueue[q_idx]);
631 	}
632 }
633 
634 static struct spdk_vhost_scsi_dev *
635 to_scsi_dev(struct spdk_vhost_dev *ctrlr)
636 {
637 	if (ctrlr == NULL) {
638 		return NULL;
639 	}
640 
641 	if (ctrlr->type != SPDK_VHOST_DEV_T_SCSI) {
642 		SPDK_ERRLOG("Controller %s: expected SCSI controller (%d) but got %d\n",
643 			    ctrlr->name, SPDK_VHOST_DEV_T_SCSI, ctrlr->type);
644 		return NULL;
645 	}
646 
647 	return (struct spdk_vhost_scsi_dev *)ctrlr;
648 }
649 
650 int
651 spdk_vhost_scsi_dev_construct(const char *name, const char *cpumask)
652 {
653 	struct spdk_vhost_scsi_dev *svdev = spdk_dma_zmalloc(sizeof(struct spdk_vhost_scsi_dev),
654 					    SPDK_CACHE_LINE_SIZE, NULL);
655 	int rc;
656 
657 	if (svdev == NULL) {
658 		return -ENOMEM;
659 	}
660 
661 	spdk_vhost_lock();
662 	rc = spdk_vhost_dev_construct(&svdev->vdev, name, cpumask, SPDK_VHOST_DEV_T_SCSI,
663 				      &spdk_vhost_scsi_device_backend);
664 
665 	if (rc) {
666 		spdk_dma_free(svdev);
667 	}
668 
669 	spdk_vhost_unlock();
670 	return rc;
671 }
672 
673 int
674 spdk_vhost_scsi_dev_remove(struct spdk_vhost_dev *vdev)
675 {
676 	struct spdk_vhost_scsi_dev *svdev = to_scsi_dev(vdev);
677 	int rc, i;
678 
679 	if (svdev == NULL) {
680 		return -EINVAL;
681 	}
682 
683 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) {
684 		if (svdev->scsi_dev[i]) {
685 			SPDK_ERRLOG("Trying to remove non-empty controller: %s.\n", vdev->name);
686 			return -EBUSY;
687 		}
688 	}
689 
690 	rc = spdk_vhost_dev_remove(vdev);
691 	if (rc != 0) {
692 		return rc;
693 	}
694 
695 	spdk_dma_free(svdev);
696 	return 0;
697 }
698 
699 struct spdk_scsi_dev *
700 spdk_vhost_scsi_dev_get_dev(struct spdk_vhost_dev *vdev, uint8_t num)
701 {
702 	struct spdk_vhost_scsi_dev *svdev;
703 
704 	assert(num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
705 	svdev = to_scsi_dev(vdev);
706 
707 	return svdev ? svdev->scsi_dev[num] : NULL;
708 }
709 
710 static void
711 spdk_vhost_scsi_lun_hotremove(const struct spdk_scsi_lun *lun, void *arg)
712 {
713 	struct spdk_vhost_scsi_dev *svdev = arg;
714 	const struct spdk_scsi_dev *scsi_dev;
715 	unsigned scsi_dev_num;
716 
717 	assert(lun != NULL);
718 	assert(svdev != NULL);
719 	if (!spdk_vhost_dev_has_feature(&svdev->vdev, VIRTIO_SCSI_F_HOTPLUG)) {
720 		SPDK_WARNLOG("%s: hotremove is not enabled for this controller.\n", svdev->vdev.name);
721 		return;
722 	}
723 
724 	scsi_dev = spdk_scsi_lun_get_dev(lun);
725 	for (scsi_dev_num = 0; scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; scsi_dev_num++) {
726 		if (svdev->scsi_dev[scsi_dev_num] == scsi_dev) {
727 			break;
728 		}
729 	}
730 
731 	if (scsi_dev_num == SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
732 		SPDK_ERRLOG("Dev %s is not a part of vhost scsi controller '%s'.\n",
733 			    spdk_scsi_dev_get_name(scsi_dev),
734 			    svdev->vdev.name);
735 		return;
736 	}
737 
738 	/* remove entire device */
739 	spdk_vhost_scsi_dev_remove_dev(&svdev->vdev, scsi_dev_num, NULL, NULL);
740 }
741 
742 int
743 spdk_vhost_scsi_dev_add_dev(struct spdk_vhost_dev *vdev, unsigned scsi_dev_num,
744 			    const char *lun_name)
745 {
746 	struct spdk_vhost_scsi_dev *svdev;
747 	char dev_name[SPDK_SCSI_DEV_MAX_NAME];
748 	int lun_id_list[1];
749 	char *lun_names_list[1];
750 
751 	svdev = to_scsi_dev(vdev);
752 	if (svdev == NULL) {
753 		return -EINVAL;
754 	}
755 
756 	if (scsi_dev_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
757 		SPDK_ERRLOG("Controller %d device number too big (max %d)\n", scsi_dev_num,
758 			    SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
759 		return -EINVAL;
760 	}
761 
762 	if (lun_name == NULL) {
763 		SPDK_ERRLOG("No lun name specified \n");
764 		return -EINVAL;
765 	} else if (strlen(lun_name) >= SPDK_SCSI_DEV_MAX_NAME) {
766 		SPDK_ERRLOG("LUN name '%s' too long (max %d).\n", lun_name, SPDK_SCSI_DEV_MAX_NAME - 1);
767 		return -1;
768 	}
769 
770 	svdev = to_scsi_dev(vdev);
771 	if (svdev == NULL) {
772 		return -EINVAL;
773 	}
774 
775 	if (vdev->lcore != -1 && !spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
776 		SPDK_ERRLOG("Controller %s is in use and hotplug is not supported\n", vdev->name);
777 		return -ENOTSUP;
778 	}
779 
780 	if (svdev->scsi_dev[scsi_dev_num] != NULL) {
781 		SPDK_ERRLOG("Controller %s dev %u already occupied\n", vdev->name, scsi_dev_num);
782 		return -EEXIST;
783 	}
784 
785 	/*
786 	 * At this stage only one LUN per device
787 	 */
788 	snprintf(dev_name, sizeof(dev_name), "Dev %u", scsi_dev_num);
789 	lun_id_list[0] = 0;
790 	lun_names_list[0] = (char *)lun_name;
791 
792 	svdev->scsi_dev_state[scsi_dev_num].removed = false;
793 	svdev->scsi_dev[scsi_dev_num] = spdk_scsi_dev_construct(dev_name, lun_names_list, lun_id_list, 1,
794 					SPDK_SPC_PROTOCOL_IDENTIFIER_SAS, spdk_vhost_scsi_lun_hotremove, svdev);
795 
796 	if (svdev->scsi_dev[scsi_dev_num] == NULL) {
797 		SPDK_ERRLOG("Couldn't create spdk SCSI device '%s' using lun device '%s' in controller: %s\n",
798 			    dev_name, lun_name, vdev->name);
799 		return -EINVAL;
800 	}
801 	spdk_scsi_dev_add_port(svdev->scsi_dev[scsi_dev_num], 0, "vhost");
802 
803 	if (vdev->lcore != -1) {
804 		spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[scsi_dev_num]);
805 		eventq_enqueue(svdev, scsi_dev_num, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_RESCAN);
806 	}
807 
808 	SPDK_NOTICELOG("Controller %s: defined device '%s' using lun '%s'\n",
809 		       vdev->name, dev_name, lun_name);
810 	return 0;
811 }
812 
813 int
814 spdk_vhost_scsi_dev_remove_dev(struct spdk_vhost_dev *vdev, unsigned scsi_dev_num,
815 			       spdk_vhost_event_fn cb_fn, void *cb_arg)
816 {
817 	struct spdk_vhost_scsi_dev *svdev;
818 	struct spdk_scsi_dev *scsi_dev;
819 	struct spdk_scsi_dev_vhost_state *scsi_dev_state;
820 	int rc = 0;
821 
822 	if (scsi_dev_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
823 		SPDK_ERRLOG("%s: invalid device number %d\n", vdev->name, scsi_dev_num);
824 		return -EINVAL;
825 	}
826 
827 	svdev = to_scsi_dev(vdev);
828 	if (svdev == NULL) {
829 		return -ENODEV;
830 	}
831 
832 	scsi_dev = svdev->scsi_dev[scsi_dev_num];
833 	if (scsi_dev == NULL) {
834 		SPDK_ERRLOG("Controller %s dev %u is not occupied\n", vdev->name, scsi_dev_num);
835 		return -ENODEV;
836 	}
837 
838 	if (svdev->vdev.lcore == -1) {
839 		/* controller is not in use, remove dev and exit */
840 		spdk_scsi_dev_destruct(scsi_dev);
841 		svdev->scsi_dev[scsi_dev_num] = NULL;
842 		if (cb_fn) {
843 			rc = cb_fn(vdev, cb_arg);
844 		}
845 		SPDK_NOTICELOG("%s: removed device 'Dev %u'\n", vdev->name, scsi_dev_num);
846 		return rc;
847 	}
848 
849 	if (!spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
850 		SPDK_WARNLOG("%s: 'Dev %u' is in use and hot-detach is not enabled for this controller.\n",
851 			     svdev->vdev.name, scsi_dev_num);
852 		return -ENOTSUP;
853 	}
854 
855 	scsi_dev_state = &svdev->scsi_dev_state[scsi_dev_num];
856 	if (scsi_dev_state->removed) {
857 		SPDK_WARNLOG("%s: 'Dev %u' has been already marked to hotremove.\n", svdev->vdev.name,
858 			     scsi_dev_num);
859 		return -EBUSY;
860 	}
861 
862 	scsi_dev_state->remove_cb = cb_fn;
863 	scsi_dev_state->remove_ctx = cb_arg;
864 	scsi_dev_state->removed = true;
865 	eventq_enqueue(svdev, scsi_dev_num, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_REMOVED);
866 
867 	SPDK_NOTICELOG("%s: queued 'Dev %u' for hot-detach.\n", vdev->name, scsi_dev_num);
868 	return 0;
869 }
870 
871 int
872 spdk_vhost_scsi_controller_construct(void)
873 {
874 	struct spdk_conf_section *sp = spdk_conf_first_section(NULL);
875 	struct spdk_vhost_dev *vdev;
876 	int i, dev_num;
877 	unsigned ctrlr_num = 0;
878 	char *lun_name, *dev_num_str;
879 	char *cpumask;
880 	char *name;
881 
882 	while (sp != NULL) {
883 		if (!spdk_conf_section_match_prefix(sp, "VhostScsi")) {
884 			sp = spdk_conf_next_section(sp);
885 			continue;
886 		}
887 
888 		if (sscanf(spdk_conf_section_get_name(sp), "VhostScsi%u", &ctrlr_num) != 1) {
889 			SPDK_ERRLOG("Section '%s' has non-numeric suffix.\n",
890 				    spdk_conf_section_get_name(sp));
891 			return -1;
892 		}
893 
894 		name =  spdk_conf_section_get_val(sp, "Name");
895 		cpumask = spdk_conf_section_get_val(sp, "Cpumask");
896 
897 		if (spdk_vhost_scsi_dev_construct(name, cpumask) < 0) {
898 			return -1;
899 		}
900 
901 		vdev = spdk_vhost_dev_find(name);
902 		assert(vdev);
903 
904 		for (i = 0; spdk_conf_section_get_nval(sp, "Dev", i) != NULL; i++) {
905 			dev_num_str = spdk_conf_section_get_nmval(sp, "Dev", i, 0);
906 			if (dev_num_str == NULL) {
907 				SPDK_ERRLOG("%s: Invalid or missing Dev number\n", name);
908 				return -1;
909 			}
910 
911 			dev_num = (int)strtol(dev_num_str, NULL, 10);
912 			lun_name = spdk_conf_section_get_nmval(sp, "Dev", i, 1);
913 			if (lun_name == NULL) {
914 				SPDK_ERRLOG("%s: Invalid or missing LUN name for dev %d\n", name, dev_num);
915 				return -1;
916 			} else if (spdk_conf_section_get_nmval(sp, "Dev", i, 2)) {
917 				SPDK_ERRLOG("%s: Only one LUN per vhost SCSI device supported\n", name);
918 				return -1;
919 			}
920 
921 			if (spdk_vhost_scsi_dev_add_dev(vdev, dev_num, lun_name) < 0) {
922 				return -1;
923 			}
924 		}
925 
926 		sp = spdk_conf_next_section(sp);
927 
928 	}
929 
930 	return 0;
931 }
932 
933 static void
934 free_task_pool(struct spdk_vhost_scsi_dev *svdev)
935 {
936 	struct spdk_vhost_virtqueue *vq;
937 	uint16_t i;
938 
939 	for (i = 0; i < svdev->vdev.num_queues; i++) {
940 		vq = &svdev->vdev.virtqueue[i];
941 		if (vq->tasks == NULL) {
942 			continue;
943 		}
944 
945 		spdk_dma_free(vq->tasks);
946 		vq->tasks = NULL;
947 	}
948 }
949 
950 static int
951 alloc_task_pool(struct spdk_vhost_scsi_dev *svdev)
952 {
953 	struct spdk_vhost_virtqueue *vq;
954 	struct spdk_vhost_scsi_task *task;
955 	uint32_t task_cnt;
956 	uint16_t i;
957 	uint32_t j;
958 
959 	for (i = 0; i < svdev->vdev.num_queues; i++) {
960 		vq = &svdev->vdev.virtqueue[i];
961 		task_cnt = vq->vring.size;
962 		if (task_cnt > SPDK_VHOST_MAX_VQ_SIZE) {
963 			/* sanity check */
964 			SPDK_ERRLOG("Controller %s: virtuque %"PRIu16" is too big. (size = %"PRIu32", max = %"PRIu32")\n",
965 				    svdev->vdev.name, i, task_cnt, SPDK_VHOST_MAX_VQ_SIZE);
966 			free_task_pool(svdev);
967 			return -1;
968 		}
969 		vq->tasks = spdk_dma_zmalloc(sizeof(struct spdk_vhost_scsi_task) * task_cnt,
970 					     SPDK_CACHE_LINE_SIZE, NULL);
971 		if (vq->tasks == NULL) {
972 			SPDK_ERRLOG("Controller %s: failed to allocate %"PRIu32" tasks for virtqueue %"PRIu16"\n",
973 				    svdev->vdev.name, task_cnt, i);
974 			free_task_pool(svdev);
975 			return -1;
976 		}
977 
978 		for (j = 0; j < task_cnt; j++) {
979 			task = &((struct spdk_vhost_scsi_task *)vq->tasks)[j];
980 			task->svdev = svdev;
981 			task->vq = vq;
982 			task->req_idx = j;
983 		}
984 	}
985 
986 	return 0;
987 }
988 
989 /*
990  * A new device is added to a data core. First the device is added to the main linked list
991  * and then allocated to a specific data core.
992  */
993 static int
994 spdk_vhost_scsi_start(struct spdk_vhost_dev *vdev, void *event_ctx)
995 {
996 	struct spdk_vhost_scsi_dev *svdev;
997 	uint32_t i;
998 	int rc;
999 
1000 	svdev = to_scsi_dev(vdev);
1001 	if (svdev == NULL) {
1002 		SPDK_ERRLOG("Trying to start non-scsi controller as a scsi one.\n");
1003 		rc = -1;
1004 		goto out;
1005 	}
1006 
1007 	rc = alloc_task_pool(svdev);
1008 	if (rc != 0) {
1009 		SPDK_ERRLOG("%s: failed to alloc task pool.\n", vdev->name);
1010 		goto out;
1011 	}
1012 
1013 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {
1014 		if (svdev->scsi_dev[i] == NULL) {
1015 			continue;
1016 		}
1017 		spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[i]);
1018 	}
1019 	SPDK_NOTICELOG("Started poller for vhost controller %s on lcore %d\n", vdev->name, vdev->lcore);
1020 
1021 	spdk_vhost_dev_mem_register(vdev);
1022 
1023 	spdk_poller_register(&svdev->requestq_poller, vdev_worker, svdev, vdev->lcore, 0);
1024 	spdk_poller_register(&svdev->mgmt_poller, vdev_mgmt_worker, svdev, vdev->lcore,
1025 			     MGMT_POLL_PERIOD_US);
1026 out:
1027 	spdk_vhost_dev_backend_event_done(event_ctx, rc);
1028 	return rc;
1029 }
1030 
1031 struct spdk_vhost_dev_destroy_ctx {
1032 	struct spdk_vhost_scsi_dev *svdev;
1033 	struct spdk_poller *poller;
1034 	void *event_ctx;
1035 };
1036 
1037 static void
1038 destroy_device_poller_cb(void *arg)
1039 {
1040 	struct spdk_vhost_dev_destroy_ctx *ctx = arg;
1041 	struct spdk_vhost_scsi_dev *svdev = ctx->svdev;
1042 	uint32_t i;
1043 
1044 	if (svdev->vdev.task_cnt > 0) {
1045 		return;
1046 	}
1047 
1048 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {
1049 		if (svdev->scsi_dev[i] == NULL) {
1050 			continue;
1051 		}
1052 		spdk_scsi_dev_free_io_channels(svdev->scsi_dev[i]);
1053 	}
1054 
1055 	SPDK_NOTICELOG("Stopping poller for vhost controller %s\n", svdev->vdev.name);
1056 	spdk_vhost_dev_mem_unregister(&svdev->vdev);
1057 
1058 	free_task_pool(svdev);
1059 
1060 	spdk_poller_unregister(&ctx->poller, NULL);
1061 	spdk_vhost_dev_backend_event_done(ctx->event_ctx, 0);
1062 }
1063 
1064 static int
1065 spdk_vhost_scsi_stop(struct spdk_vhost_dev *vdev, void *event_ctx)
1066 {
1067 	struct spdk_vhost_scsi_dev *svdev;
1068 	struct spdk_vhost_dev_destroy_ctx *destroy_ctx;
1069 
1070 	svdev = to_scsi_dev(vdev);
1071 	if (svdev == NULL) {
1072 		SPDK_ERRLOG("Trying to stop non-scsi controller as a scsi one.\n");
1073 		goto err;
1074 	}
1075 
1076 	destroy_ctx = spdk_dma_zmalloc(sizeof(*destroy_ctx), SPDK_CACHE_LINE_SIZE, NULL);
1077 	if (destroy_ctx == NULL) {
1078 		SPDK_ERRLOG("Failed to alloc memory for destroying device.\n");
1079 		goto err;
1080 	}
1081 
1082 	destroy_ctx->svdev = svdev;
1083 	destroy_ctx->event_ctx = event_ctx;
1084 
1085 	spdk_poller_unregister(&svdev->requestq_poller, NULL);
1086 	spdk_poller_unregister(&svdev->mgmt_poller, NULL);
1087 	spdk_poller_register(&destroy_ctx->poller, destroy_device_poller_cb, destroy_ctx, vdev->lcore,
1088 			     1000);
1089 
1090 	return 0;
1091 
1092 err:
1093 	spdk_vhost_dev_backend_event_done(event_ctx, -1);
1094 	return -1;
1095 }
1096 
1097 int
1098 spdk_vhost_init(void)
1099 {
1100 	return 0;
1101 }
1102 
1103 void
1104 spdk_vhost_fini(void)
1105 {
1106 }
1107 
1108 static void
1109 spdk_vhost_scsi_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
1110 {
1111 	struct spdk_scsi_dev *sdev;
1112 	struct spdk_scsi_lun *lun;
1113 	uint32_t dev_idx;
1114 	uint32_t lun_idx;
1115 
1116 	assert(vdev != NULL);
1117 	spdk_json_write_name(w, "scsi");
1118 	spdk_json_write_object_begin(w);
1119 	for (dev_idx = 0; dev_idx < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; dev_idx++) {
1120 		sdev = spdk_vhost_scsi_dev_get_dev(vdev, dev_idx);
1121 		if (!sdev) {
1122 			continue;
1123 		}
1124 
1125 		spdk_json_write_name(w, "scsi_dev_num");
1126 		spdk_json_write_uint32(w, dev_idx);
1127 
1128 		spdk_json_write_name(w, "id");
1129 		spdk_json_write_int32(w, spdk_scsi_dev_get_id(sdev));
1130 
1131 		spdk_json_write_name(w, "device_name");
1132 		spdk_json_write_string(w, spdk_scsi_dev_get_name(sdev));
1133 
1134 		spdk_json_write_name(w, "luns");
1135 		spdk_json_write_array_begin(w);
1136 
1137 		for (lun_idx = 0; lun_idx < SPDK_SCSI_DEV_MAX_LUN; lun_idx++) {
1138 			lun = spdk_scsi_dev_get_lun(sdev, lun_idx);
1139 			if (!lun) {
1140 				continue;
1141 			}
1142 
1143 			spdk_json_write_object_begin(w);
1144 
1145 			spdk_json_write_name(w, "id");
1146 			spdk_json_write_int32(w, spdk_scsi_lun_get_id(lun));
1147 
1148 			spdk_json_write_name(w, "name");
1149 			spdk_json_write_string(w, spdk_scsi_lun_get_name(lun));
1150 
1151 			spdk_json_write_object_end(w);
1152 		}
1153 
1154 		spdk_json_write_array_end(w);
1155 	}
1156 
1157 	spdk_json_write_object_end(w);
1158 }
1159 
1160 SPDK_LOG_REGISTER_TRACE_FLAG("vhost_scsi", SPDK_TRACE_VHOST_SCSI)
1161 SPDK_LOG_REGISTER_TRACE_FLAG("vhost_scsi_queue", SPDK_TRACE_VHOST_SCSI_QUEUE)
1162 SPDK_LOG_REGISTER_TRACE_FLAG("vhost_scsi_data", SPDK_TRACE_VHOST_SCSI_DATA)
1163