xref: /spdk/lib/vhost/vhost_scsi.c (revision 179ed697b3c461d100e675915d074be717b7b9cc)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include <linux/virtio_scsi.h>
37 
38 #include "spdk/env.h"
39 #include "spdk/scsi.h"
40 #include "spdk/scsi_spec.h"
41 #include "spdk/conf.h"
42 #include "spdk/event.h"
43 #include "spdk/util.h"
44 #include "spdk/likely.h"
45 
46 #include "spdk/vhost.h"
47 #include "vhost_internal.h"
48 
49 #include "spdk_internal/assert.h"
50 
51 /* Features supported by SPDK VHOST lib. */
52 #define SPDK_VHOST_SCSI_FEATURES	(SPDK_VHOST_FEATURES | \
53 					(1ULL << VIRTIO_SCSI_F_INOUT) | \
54 					(1ULL << VIRTIO_SCSI_F_HOTPLUG) | \
55 					(1ULL << VIRTIO_SCSI_F_CHANGE ) | \
56 					(1ULL << VIRTIO_SCSI_F_T10_PI ))
57 
58 /* Features that are specified in VIRTIO SCSI but currently not supported:
59  * - Live migration not supported yet
60  * - T10 PI
61  */
62 #define SPDK_VHOST_SCSI_DISABLED_FEATURES	(SPDK_VHOST_DISABLED_FEATURES | \
63 						(1ULL << VIRTIO_SCSI_F_T10_PI ))
64 
65 #define MGMT_POLL_PERIOD_US (1000 * 5)
66 
67 #define VIRTIO_SCSI_CONTROLQ   0
68 #define VIRTIO_SCSI_EVENTQ   1
69 #define VIRTIO_SCSI_REQUESTQ   2
70 
71 struct spdk_vhost_scsi_dev {
72 	struct spdk_vhost_dev vdev;
73 	struct spdk_scsi_dev *scsi_dev[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS];
74 	bool detached_dev[SPDK_VHOST_SCSI_CTRLR_MAX_DEVS];
75 
76 	struct spdk_ring *task_pool;
77 	struct spdk_poller *requestq_poller;
78 	struct spdk_poller *mgmt_poller;
79 
80 	struct spdk_ring *vhost_events;
81 } __rte_cache_aligned;
82 
83 struct spdk_vhost_scsi_task {
84 	struct spdk_scsi_task	scsi;
85 	struct iovec iovs[SPDK_VHOST_IOVS_MAX];
86 
87 	union {
88 		struct virtio_scsi_cmd_resp *resp;
89 		struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
90 	};
91 
92 	struct spdk_vhost_scsi_dev *svdev;
93 	struct spdk_scsi_dev *scsi_dev;
94 
95 	int req_idx;
96 
97 	struct rte_vhost_vring *vq;
98 };
99 
100 enum spdk_vhost_scsi_event_type {
101 	SPDK_VHOST_SCSI_EVENT_HOTATTACH,
102 	SPDK_VHOST_SCSI_EVENT_HOTDETACH,
103 };
104 
105 struct spdk_vhost_scsi_event {
106 	enum spdk_vhost_scsi_event_type type;
107 	unsigned dev_index;
108 	struct spdk_scsi_dev *dev;
109 	struct spdk_scsi_lun *lun;
110 };
111 
112 static int new_device(struct spdk_vhost_dev *);
113 static int destroy_device(struct spdk_vhost_dev *);
114 static void spdk_vhost_scsi_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
115 
116 const struct spdk_vhost_dev_backend spdk_vhost_scsi_device_backend = {
117 	.virtio_features = SPDK_VHOST_SCSI_FEATURES,
118 	.disabled_features = SPDK_VHOST_SCSI_DISABLED_FEATURES,
119 	.new_device =  new_device,
120 	.destroy_device = destroy_device,
121 	.dump_config_json = spdk_vhost_scsi_config_json,
122 };
123 
124 static void
125 spdk_vhost_scsi_task_put(struct spdk_vhost_scsi_task *task)
126 {
127 	spdk_scsi_task_put(&task->scsi);
128 }
129 
130 static void
131 spdk_vhost_scsi_task_free_cb(struct spdk_scsi_task *scsi_task)
132 {
133 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
134 
135 	assert(task->svdev->vdev.task_cnt > 0);
136 	task->svdev->vdev.task_cnt--;
137 	spdk_ring_enqueue(task->svdev->task_pool, (void **) &task, 1);
138 }
139 
140 static void
141 spdk_vhost_get_tasks(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_scsi_task **tasks,
142 		     size_t count)
143 {
144 	size_t res_count;
145 
146 	res_count = spdk_ring_dequeue(svdev->task_pool, (void **)tasks, count);
147 	if (res_count != count) {
148 		SPDK_ERRLOG("%s: couldn't get %zu tasks from task_pool\n", svdev->vdev.name, count);
149 		/* FIXME: we should never run out of tasks, but what if we do? */
150 		abort();
151 	}
152 
153 	assert(svdev->vdev.task_cnt <= INT_MAX - (int) res_count);
154 	svdev->vdev.task_cnt += res_count;
155 }
156 
157 static void
158 spdk_vhost_scsi_event_process(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_scsi_event *ev,
159 			      struct virtio_scsi_event *desc_ev)
160 {
161 	int event_id, reason_id;
162 	int dev_id, lun_id;
163 
164 	assert(ev->dev);
165 	assert(ev->dev_index < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
166 	dev_id = ev->dev_index;
167 
168 	switch (ev->type) {
169 	case SPDK_VHOST_SCSI_EVENT_HOTATTACH:
170 		event_id = VIRTIO_SCSI_T_TRANSPORT_RESET;
171 		reason_id = VIRTIO_SCSI_EVT_RESET_RESCAN;
172 
173 		spdk_scsi_dev_allocate_io_channels(ev->dev);
174 		svdev->scsi_dev[dev_id] = ev->dev;
175 		svdev->detached_dev[dev_id] = false;
176 		SPDK_NOTICELOG("%s: hot-attached device %d\n", svdev->vdev.name, dev_id);
177 		break;
178 	case SPDK_VHOST_SCSI_EVENT_HOTDETACH:
179 		event_id = VIRTIO_SCSI_T_TRANSPORT_RESET;
180 		reason_id = VIRTIO_SCSI_EVT_RESET_REMOVED;
181 
182 		if (ev->lun == NULL) {
183 			svdev->detached_dev[dev_id] = true;
184 			SPDK_NOTICELOG("%s: marked 'Dev %d' for hot-detach\n", svdev->vdev.name, dev_id);
185 		} else {
186 			SPDK_NOTICELOG("%s: hotremoved LUN '%s'\n", svdev->vdev.name, spdk_scsi_lun_get_name(ev->lun));
187 		}
188 		break;
189 	default:
190 		SPDK_UNREACHABLE();
191 	}
192 
193 	/* some events may apply to the entire device via lun id set to 0 */
194 	lun_id = ev->lun == NULL ? 0 : spdk_scsi_lun_get_id(ev->lun);
195 
196 	if (desc_ev) {
197 		desc_ev->event = event_id;
198 		desc_ev->lun[0] = 1;
199 		desc_ev->lun[1] = dev_id;
200 		desc_ev->lun[2] = lun_id >> 8; /* relies on linux kernel implementation */
201 		desc_ev->lun[3] = lun_id & 0xFF;
202 		memset(&desc_ev->lun[4], 0, 4);
203 		desc_ev->reason = reason_id;
204 	}
205 }
206 
207 /**
208  * Process vhost event, send virtio event
209  */
210 static void
211 process_event(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_scsi_event *ev)
212 {
213 	struct vring_desc *desc;
214 	struct virtio_scsi_event *desc_ev;
215 	uint32_t req_size;
216 	uint16_t req;
217 	struct rte_vhost_vring *vq = &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ];
218 
219 	if (spdk_vhost_vq_avail_ring_get(vq, &req, 1) != 1) {
220 		SPDK_ERRLOG("%s: no avail virtio eventq ring entries. virtio event won't be sent.\n",
221 			    svdev->vdev.name);
222 		desc = NULL;
223 		desc_ev = NULL;
224 		req_size = 0;
225 		/* even though we can't send virtio event,
226 		 * the spdk vhost event should still be processed
227 		 */
228 	} else {
229 		desc =  spdk_vhost_vq_get_desc(vq, req);
230 		desc_ev = spdk_vhost_gpa_to_vva(&svdev->vdev, desc->addr);
231 		req_size = sizeof(*desc_ev);
232 
233 		if (desc->len < sizeof(*desc_ev) || desc_ev == NULL) {
234 			SPDK_ERRLOG("%s: invalid eventq descriptor.\n", svdev->vdev.name);
235 			desc_ev = NULL;
236 			req_size = 0;
237 		}
238 	}
239 
240 	spdk_vhost_scsi_event_process(svdev, ev, desc_ev);
241 
242 	if (desc) {
243 		spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, req, req_size);
244 	}
245 }
246 
247 static void
248 process_eventq(struct spdk_vhost_scsi_dev *svdev)
249 {
250 	struct spdk_vhost_scsi_event *ev;
251 
252 	while (spdk_ring_dequeue(svdev->vhost_events, (void **)&ev, 1) == 1) {
253 		process_event(svdev, ev);
254 		spdk_dma_free(ev);
255 	}
256 }
257 
258 static void
259 process_removed_devs(struct spdk_vhost_scsi_dev *svdev)
260 {
261 	struct spdk_scsi_dev *dev;
262 	int i;
263 
264 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) {
265 		dev = svdev->scsi_dev[i];
266 
267 		if (dev && svdev->detached_dev[i] && !spdk_scsi_dev_has_pending_tasks(dev)) {
268 			spdk_scsi_dev_free_io_channels(dev);
269 			spdk_scsi_dev_destruct(dev);
270 			svdev->scsi_dev[i] = NULL;
271 			SPDK_NOTICELOG("%s: hot-detached 'Dev %d'.\n", svdev->vdev.name, i);
272 		}
273 	}
274 }
275 
276 static void
277 enqueue_vhost_event(struct spdk_vhost_scsi_dev *svdev, enum spdk_vhost_scsi_event_type type,
278 		    int dev_index, struct spdk_scsi_dev *dev, struct spdk_scsi_lun *lun)
279 {
280 	struct spdk_vhost_scsi_event *ev;
281 
282 	if (dev == NULL) {
283 		SPDK_ERRLOG("%s: vhost event device cannot be NULL.\n", svdev->vdev.name);
284 		return;
285 	}
286 
287 	ev = spdk_dma_zmalloc(sizeof(*ev), SPDK_CACHE_LINE_SIZE, NULL);
288 	if (ev == NULL) {
289 		SPDK_ERRLOG("%s: failed to alloc vhost event.\n", svdev->vdev.name);
290 		return;
291 	}
292 
293 	ev->type = type;
294 	ev->dev_index = dev_index;
295 	ev->dev = dev;
296 	ev->lun = lun;
297 
298 	if (spdk_ring_enqueue(svdev->vhost_events, (void **)&ev, 1) != 1) {
299 		SPDK_ERRLOG("%s: failed to enqueue vhost event (no room in ring?).\n", svdev->vdev.name);
300 		spdk_dma_free(ev);
301 	}
302 }
303 
304 static void
305 submit_completion(struct spdk_vhost_scsi_task *task)
306 {
307 	spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx,
308 					task->scsi.data_transferred);
309 	SPDK_TRACELOG(SPDK_TRACE_VHOST_SCSI, "Finished task (%p) req_idx=%d\n", task, task->req_idx);
310 
311 	spdk_vhost_scsi_task_put(task);
312 }
313 
314 static void
315 spdk_vhost_scsi_task_mgmt_cpl(struct spdk_scsi_task *scsi_task)
316 {
317 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
318 
319 	submit_completion(task);
320 }
321 
322 static void
323 spdk_vhost_scsi_task_cpl(struct spdk_scsi_task *scsi_task)
324 {
325 	struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
326 
327 	/* The SCSI task has completed.  Do final processing and then post
328 	   notification to the virtqueue's "used" ring.
329 	 */
330 	task->resp->status = task->scsi.status;
331 
332 	if (task->scsi.status != SPDK_SCSI_STATUS_GOOD) {
333 		memcpy(task->resp->sense, task->scsi.sense_data, task->scsi.sense_data_len);
334 		task->resp->sense_len = task->scsi.sense_data_len;
335 	}
336 	task->resp->resid = task->scsi.transfer_len - task->scsi.data_transferred;
337 
338 	submit_completion(task);
339 }
340 
341 static void
342 task_submit(struct spdk_vhost_scsi_task *task)
343 {
344 	task->resp->response = VIRTIO_SCSI_S_OK;
345 	spdk_scsi_dev_queue_task(task->scsi_dev, &task->scsi);
346 }
347 
348 static void
349 mgmt_task_submit(struct spdk_vhost_scsi_task *task, enum spdk_scsi_task_func func)
350 {
351 	task->tmf_resp->response = VIRTIO_SCSI_S_OK;
352 	spdk_scsi_dev_queue_mgmt_task(task->scsi_dev, &task->scsi, func);
353 }
354 
355 static void
356 invalid_request(struct spdk_vhost_scsi_task *task)
357 {
358 	/* Flush eventq so that guest is instantly notified about any hotremoved luns.
359 	 * This might prevent him from sending more invalid requests and trying to reset
360 	 * the device.
361 	 */
362 	process_eventq(task->svdev);
363 	spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx, 0);
364 	spdk_vhost_scsi_task_put(task);
365 
366 	SPDK_TRACELOG(SPDK_TRACE_VHOST_SCSI, "Invalid request (status=%" PRIu8")\n",
367 		      task->resp ? task->resp->response : -1);
368 }
369 
370 static int
371 spdk_vhost_scsi_task_init_target(struct spdk_vhost_scsi_task *task, const __u8 *lun)
372 {
373 	struct spdk_scsi_dev *dev;
374 	uint16_t lun_id = (((uint16_t)lun[2] << 8) | lun[3]) & 0x3FFF;
375 
376 	SPDK_TRACEDUMP(SPDK_TRACE_VHOST_SCSI_QUEUE, "LUN", lun, 8);
377 
378 	/* First byte must be 1 and second is target */
379 	if (lun[0] != 1 || lun[1] >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS)
380 		return -1;
381 
382 	dev = task->svdev->scsi_dev[lun[1]];
383 	task->scsi_dev = dev;
384 	if (dev == NULL) {
385 		/* If dev has been hot-detached, return 0 to allow sending additional
386 		 * scsi hotremove event via sense codes.
387 		 */
388 		return task->svdev->detached_dev[lun[1]] ? 0 : -1;
389 	}
390 
391 	task->scsi.target_port = spdk_scsi_dev_find_port_by_id(task->scsi_dev, 0);
392 	task->scsi.lun = spdk_scsi_dev_get_lun(dev, lun_id);
393 	return 0;
394 }
395 
396 static void
397 process_ctrl_request(struct spdk_vhost_scsi_task *task)
398 {
399 	struct vring_desc *desc;
400 	struct virtio_scsi_ctrl_tmf_req *ctrl_req;
401 	struct virtio_scsi_ctrl_an_resp *an_resp;
402 
403 	spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_mgmt_cpl, spdk_vhost_scsi_task_free_cb,
404 				 NULL);
405 	desc = spdk_vhost_vq_get_desc(task->vq, task->req_idx);
406 	ctrl_req = spdk_vhost_gpa_to_vva(&task->svdev->vdev, desc->addr);
407 
408 	SPDK_TRACELOG(SPDK_TRACE_VHOST_SCSI_QUEUE,
409 		      "Processing controlq descriptor: desc %d/%p, desc_addr %p, len %d, flags %d, last_used_idx %d; kickfd %d; size %d\n",
410 		      task->req_idx, desc, (void *)desc->addr, desc->len, desc->flags, task->vq->last_used_idx,
411 		      task->vq->kickfd, task->vq->size);
412 	SPDK_TRACEDUMP(SPDK_TRACE_VHOST_SCSI_QUEUE, "Request desriptor", (uint8_t *)ctrl_req,
413 		       desc->len);
414 
415 	spdk_vhost_scsi_task_init_target(task, ctrl_req->lun);
416 
417 	/* Process the TMF request */
418 	switch (ctrl_req->type) {
419 	case VIRTIO_SCSI_T_TMF:
420 		/* Get the response buffer */
421 		assert(spdk_vhost_vring_desc_has_next(desc));
422 		desc = spdk_vhost_vring_desc_get_next(task->vq->desc, desc);
423 		task->tmf_resp = spdk_vhost_gpa_to_vva(&task->svdev->vdev, desc->addr);
424 
425 		/* Check if we are processing a valid request */
426 		if (task->scsi_dev == NULL) {
427 			task->tmf_resp->response = VIRTIO_SCSI_S_BAD_TARGET;
428 			break;
429 		}
430 
431 		switch (ctrl_req->subtype) {
432 		case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
433 			/* Handle LUN reset */
434 			SPDK_TRACELOG(SPDK_TRACE_VHOST_SCSI_QUEUE, "LUN reset\n");
435 
436 			mgmt_task_submit(task, SPDK_SCSI_TASK_FUNC_LUN_RESET);
437 			return;
438 		default:
439 			task->tmf_resp->response = VIRTIO_SCSI_S_ABORTED;
440 			/* Unsupported command */
441 			SPDK_TRACELOG(SPDK_TRACE_VHOST_SCSI_QUEUE, "Unsupported TMF command %x\n", ctrl_req->subtype);
442 			break;
443 		}
444 		break;
445 	case VIRTIO_SCSI_T_AN_QUERY:
446 	case VIRTIO_SCSI_T_AN_SUBSCRIBE: {
447 		desc = spdk_vhost_vring_desc_get_next(task->vq->desc, desc);
448 		an_resp = spdk_vhost_gpa_to_vva(&task->svdev->vdev, desc->addr);
449 		an_resp->response = VIRTIO_SCSI_S_ABORTED;
450 		break;
451 	}
452 	default:
453 		SPDK_TRACELOG(SPDK_TRACE_VHOST_SCSI_QUEUE, "Unsupported control command %x\n", ctrl_req->type);
454 		break;
455 	}
456 
457 	spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx, 0);
458 	spdk_vhost_scsi_task_put(task);
459 }
460 
461 /*
462  * Process task's descriptor chain and setup data related fields.
463  * Return
464  *   -1 if request is invalid and must be aborted,
465  *    0 if all data are set.
466  */
467 static int
468 task_data_setup(struct spdk_vhost_scsi_task *task,
469 		struct virtio_scsi_cmd_req **req)
470 {
471 	struct rte_vhost_vring *vq = task->vq;
472 	struct spdk_vhost_dev *vdev = &task->svdev->vdev;
473 	struct vring_desc *desc =  spdk_vhost_vq_get_desc(task->vq, task->req_idx);
474 	struct iovec *iovs = task->iovs;
475 	uint16_t iovcnt = 0, iovcnt_max = SPDK_VHOST_IOVS_MAX;
476 	uint32_t len = 0;
477 
478 	/* Sanity check. First descriptor must be readable and must have next one. */
479 	if (spdk_unlikely(spdk_vhost_vring_desc_is_wr(desc) || !spdk_vhost_vring_desc_has_next(desc))) {
480 		SPDK_WARNLOG("Invalid first (request) descriptor.\n");
481 		task->resp = NULL;
482 		goto abort_task;
483 	}
484 
485 	spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_cpl, spdk_vhost_scsi_task_free_cb, NULL);
486 	*req = spdk_vhost_gpa_to_vva(vdev, desc->addr);
487 
488 	desc = spdk_vhost_vring_desc_get_next(vq->desc, desc);
489 	task->scsi.dxfer_dir = spdk_vhost_vring_desc_is_wr(desc) ? SPDK_SCSI_DIR_FROM_DEV :
490 			       SPDK_SCSI_DIR_TO_DEV;
491 	task->scsi.iovs = iovs;
492 
493 	if (task->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV) {
494 		/*
495 		 * FROM_DEV (READ): [RD_req][WR_resp][WR_buf0]...[WR_bufN]
496 		 */
497 		task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
498 		if (!spdk_vhost_vring_desc_has_next(desc)) {
499 			/*
500 			 * TEST UNIT READY command and some others might not contain any payload and this is not an error.
501 			 */
502 			SPDK_TRACELOG(SPDK_TRACE_VHOST_SCSI_DATA,
503 				      "No payload descriptors for FROM DEV command req_idx=%"PRIu16".\n", task->req_idx);
504 			SPDK_TRACEDUMP(SPDK_TRACE_VHOST_SCSI_DATA, "CDB=", (*req)->cdb, VIRTIO_SCSI_CDB_SIZE);
505 			task->scsi.iovcnt = 1;
506 			task->scsi.iovs[0].iov_len = 0;
507 			task->scsi.length = 0;
508 			task->scsi.transfer_len = 0;
509 			return 0;
510 		}
511 
512 		desc = spdk_vhost_vring_desc_get_next(vq->desc, desc);
513 
514 		/* All remaining descriptors are data. */
515 		while (iovcnt < iovcnt_max) {
516 			if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) {
517 				task->resp = NULL;
518 				goto abort_task;
519 			}
520 			len += desc->len;
521 
522 			if (!spdk_vhost_vring_desc_has_next(desc))
523 				break;
524 
525 			desc = spdk_vhost_vring_desc_get_next(vq->desc, desc);
526 			if (spdk_unlikely(!spdk_vhost_vring_desc_is_wr(desc))) {
527 				SPDK_WARNLOG("FROM DEV cmd: descriptor nr %" PRIu16" in payload chain is read only.\n", iovcnt);
528 				task->resp = NULL;
529 				goto abort_task;
530 			}
531 		}
532 	} else {
533 		SPDK_TRACELOG(SPDK_TRACE_VHOST_SCSI_DATA, "TO DEV");
534 		/*
535 		 * TO_DEV (WRITE):[RD_req][RD_buf0]...[RD_bufN][WR_resp]
536 		 * No need to check descriptor WR flag as this is done while setting scsi.dxfer_dir.
537 		 */
538 
539 		/* Process descriptors up to response. */
540 		while (!spdk_vhost_vring_desc_is_wr(desc) && iovcnt < iovcnt_max) {
541 			if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) {
542 				task->resp = NULL;
543 				goto abort_task;
544 			}
545 			len += desc->len;
546 
547 			if (!spdk_vhost_vring_desc_has_next(desc)) {
548 				SPDK_WARNLOG("TO_DEV cmd: no response descriptor.\n");
549 				task->resp = NULL;
550 				goto abort_task;
551 			}
552 
553 			desc = spdk_vhost_vring_desc_get_next(vq->desc, desc);
554 		}
555 
556 		task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
557 		if (spdk_vhost_vring_desc_has_next(desc)) {
558 			SPDK_WARNLOG("TO_DEV cmd: ignoring unexpected descriptors after response descriptor.\n");
559 		}
560 	}
561 
562 	if (iovcnt == iovcnt_max) {
563 		SPDK_WARNLOG("Too many IO vectors in chain!\n");
564 		goto abort_task;
565 	}
566 
567 	task->scsi.iovcnt = iovcnt;
568 	task->scsi.length = len;
569 	task->scsi.transfer_len = len;
570 	return 0;
571 
572 abort_task:
573 	if (task->resp) {
574 		task->resp->response = VIRTIO_SCSI_S_ABORTED;
575 	}
576 
577 	return -1;
578 }
579 
580 static int
581 process_request(struct spdk_vhost_scsi_task *task)
582 {
583 	struct virtio_scsi_cmd_req *req;
584 	int result;
585 
586 	result = task_data_setup(task, &req);
587 	if (result) {
588 		return result;
589 	}
590 
591 	result = spdk_vhost_scsi_task_init_target(task, req->lun);
592 	if (spdk_unlikely(result != 0)) {
593 		task->resp->response = VIRTIO_SCSI_S_BAD_TARGET;
594 		return -1;
595 	}
596 
597 	task->scsi.cdb = req->cdb;
598 	SPDK_TRACEDUMP(SPDK_TRACE_VHOST_SCSI_DATA, "request CDB", req->cdb, VIRTIO_SCSI_CDB_SIZE);
599 
600 	if (spdk_unlikely(task->scsi.lun == NULL)) {
601 		spdk_scsi_task_process_null_lun(&task->scsi);
602 		task->resp->response = VIRTIO_SCSI_S_OK;
603 		return 1;
604 	}
605 
606 	return 0;
607 }
608 
609 static void
610 process_controlq(struct spdk_vhost_scsi_dev *svdev, struct rte_vhost_vring *vq)
611 {
612 	struct spdk_vhost_scsi_task *tasks[32];
613 	struct spdk_vhost_scsi_task *task;
614 	uint16_t reqs[32];
615 	uint16_t reqs_cnt, i;
616 
617 	reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs));
618 	spdk_vhost_get_tasks(svdev, tasks, reqs_cnt);
619 	for (i = 0; i < reqs_cnt; i++) {
620 		task = tasks[i];
621 		memset(task, 0, sizeof(*task));
622 		task->vq = vq;
623 		task->svdev = svdev;
624 		task->req_idx = reqs[i];
625 
626 		process_ctrl_request(task);
627 	}
628 }
629 
630 static void
631 process_requestq(struct spdk_vhost_scsi_dev *svdev, struct rte_vhost_vring *vq)
632 {
633 	struct spdk_vhost_scsi_task *tasks[32];
634 	struct spdk_vhost_scsi_task *task;
635 	uint16_t reqs[32];
636 	uint16_t reqs_cnt, i;
637 	int result;
638 
639 	reqs_cnt = spdk_vhost_vq_avail_ring_get(vq, reqs, SPDK_COUNTOF(reqs));
640 	assert(reqs_cnt <= 32);
641 
642 	spdk_vhost_get_tasks(svdev, tasks, reqs_cnt);
643 
644 	for (i = 0; i < reqs_cnt; i++) {
645 		SPDK_TRACELOG(SPDK_TRACE_VHOST_SCSI, "====== Starting processing request idx %"PRIu16"======\n",
646 			      reqs[i]);
647 
648 		task = tasks[i];
649 		memset(task, 0, sizeof(*task));
650 		task->vq = vq;
651 		task->svdev = svdev;
652 		task->req_idx = reqs[i];
653 		result = process_request(task);
654 		if (likely(result == 0)) {
655 			task_submit(task);
656 			SPDK_TRACELOG(SPDK_TRACE_VHOST_SCSI, "====== Task %p req_idx %d submitted ======\n", task,
657 				      task->req_idx);
658 		} else if (result > 0) {
659 			spdk_vhost_scsi_task_cpl(&task->scsi);
660 			SPDK_TRACELOG(SPDK_TRACE_VHOST_SCSI, "====== Task %p req_idx %d finished early ======\n", task,
661 				      task->req_idx);
662 		} else {
663 			invalid_request(task);
664 			SPDK_TRACELOG(SPDK_TRACE_VHOST_SCSI, "====== Task %p req_idx %d failed ======\n", task,
665 				      task->req_idx);
666 		}
667 	}
668 }
669 
670 static void
671 vdev_mgmt_worker(void *arg)
672 {
673 	struct spdk_vhost_scsi_dev *svdev = arg;
674 
675 	process_removed_devs(svdev);
676 	process_eventq(svdev);
677 	process_controlq(svdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]);
678 }
679 
680 static void
681 vdev_worker(void *arg)
682 {
683 	struct spdk_vhost_scsi_dev *svdev = arg;
684 	uint32_t q_idx;
685 
686 	for (q_idx = VIRTIO_SCSI_REQUESTQ; q_idx < svdev->vdev.num_queues; q_idx++) {
687 		process_requestq(svdev, &svdev->vdev.virtqueue[q_idx]);
688 	}
689 }
690 
691 static void
692 add_vdev_cb(void *arg)
693 {
694 	struct spdk_vhost_scsi_dev *svdev = arg;
695 	struct spdk_vhost_dev *vdev = &svdev->vdev;
696 	uint32_t i;
697 
698 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {
699 		if (svdev->scsi_dev[i] == NULL) {
700 			continue;
701 		}
702 		spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[i]);
703 	}
704 	SPDK_NOTICELOG("Started poller for vhost controller %s on lcore %d\n", vdev->name, vdev->lcore);
705 
706 	spdk_vhost_dev_mem_register(vdev);
707 
708 	spdk_poller_register(&svdev->requestq_poller, vdev_worker, svdev, vdev->lcore, 0);
709 	spdk_poller_register(&svdev->mgmt_poller, vdev_mgmt_worker, svdev, vdev->lcore,
710 			     MGMT_POLL_PERIOD_US);
711 }
712 
713 static void
714 remove_vdev_cb(void *arg)
715 {
716 	struct spdk_vhost_scsi_dev *svdev = arg;
717 	uint32_t i;
718 
719 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {
720 		if (svdev->scsi_dev[i] == NULL) {
721 			continue;
722 		}
723 		spdk_scsi_dev_free_io_channels(svdev->scsi_dev[i]);
724 	}
725 
726 	SPDK_NOTICELOG("Stopping poller for vhost controller %s\n", svdev->vdev.name);
727 	spdk_vhost_dev_mem_unregister(&svdev->vdev);
728 }
729 
730 static struct spdk_vhost_scsi_dev *
731 to_scsi_dev(struct spdk_vhost_dev *ctrlr)
732 {
733 	if (ctrlr == NULL) {
734 		return NULL;
735 	}
736 
737 	if (ctrlr->type != SPDK_VHOST_DEV_T_SCSI) {
738 		SPDK_ERRLOG("Controller %s: expected SCSI controller (%d) but got %d\n",
739 			    ctrlr->name, SPDK_VHOST_DEV_T_SCSI, ctrlr->type);
740 		return NULL;
741 	}
742 
743 	return (struct spdk_vhost_scsi_dev *)ctrlr;
744 }
745 
746 int
747 spdk_vhost_scsi_dev_construct(const char *name, uint64_t cpumask)
748 {
749 	struct spdk_vhost_scsi_dev *svdev = spdk_dma_zmalloc(sizeof(struct spdk_vhost_scsi_dev),
750 					    SPDK_CACHE_LINE_SIZE, NULL);
751 	int rc;
752 
753 	if (svdev == NULL) {
754 		return -ENOMEM;
755 	}
756 
757 	rc = spdk_vhost_dev_construct(&svdev->vdev, name, cpumask, SPDK_VHOST_DEV_T_SCSI,
758 				      &spdk_vhost_scsi_device_backend);
759 
760 	if (rc) {
761 		spdk_ring_free(svdev->vhost_events);
762 		spdk_dma_free(svdev);
763 		return rc;
764 	}
765 
766 	return 0;
767 }
768 
769 int
770 spdk_vhost_scsi_dev_remove(struct spdk_vhost_dev *vdev)
771 {
772 	struct spdk_vhost_scsi_dev *svdev = to_scsi_dev(vdev);
773 	int i;
774 
775 	if (svdev == NULL) {
776 		return -EINVAL;
777 	}
778 
779 	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++i) {
780 		if (svdev->scsi_dev[i]) {
781 			SPDK_ERRLOG("Trying to remove non-empty controller: %s.\n", vdev->name);
782 			return -EBUSY;
783 		}
784 	}
785 
786 	if (spdk_vhost_dev_remove(vdev) != 0) {
787 		return -EIO;
788 	}
789 
790 	spdk_ring_free(svdev->vhost_events);
791 	spdk_dma_free(svdev);
792 	return 0;
793 }
794 
795 struct spdk_scsi_dev *
796 spdk_vhost_scsi_dev_get_dev(struct spdk_vhost_dev *vdev, uint8_t num)
797 {
798 	struct spdk_vhost_scsi_dev *svdev;
799 
800 	assert(num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
801 	svdev = to_scsi_dev(vdev);
802 
803 	return svdev ? svdev->scsi_dev[num] : NULL;
804 }
805 
806 static void
807 spdk_vhost_scsi_lun_hotremove(const struct spdk_scsi_lun *lun, void *arg)
808 {
809 	struct spdk_vhost_scsi_dev *svdev = arg;
810 	const struct spdk_scsi_dev *scsi_dev;
811 	unsigned scsi_dev_num;
812 
813 	assert(lun != NULL);
814 	assert(svdev != NULL);
815 	if (!spdk_vhost_dev_has_feature(&svdev->vdev, VIRTIO_SCSI_F_HOTPLUG)) {
816 		SPDK_WARNLOG("%s: hotremove is not enabled for this controller.\n", svdev->vdev.name);
817 		return;
818 	}
819 
820 	scsi_dev = spdk_scsi_lun_get_dev(lun);
821 
822 	for (scsi_dev_num = 0; scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; ++scsi_dev_num) {
823 		if (svdev->scsi_dev[scsi_dev_num] == scsi_dev) {
824 			break;
825 		}
826 	}
827 
828 	if (scsi_dev_num == SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
829 		SPDK_ERRLOG("%s: '%s' is not a part of this controller.\n", svdev->vdev.name,
830 			    spdk_scsi_dev_get_name(scsi_dev));
831 		return;
832 	}
833 
834 	enqueue_vhost_event(svdev, SPDK_VHOST_SCSI_EVENT_HOTDETACH, scsi_dev_num,
835 			    (struct spdk_scsi_dev *) scsi_dev, (struct spdk_scsi_lun *) lun);
836 
837 	SPDK_NOTICELOG("%s: queued LUN '%s' for hotremove\n", svdev->vdev.name,
838 		       spdk_scsi_dev_get_name(scsi_dev));
839 }
840 
841 int
842 spdk_vhost_scsi_dev_add_dev(const char *ctrlr_name, unsigned scsi_dev_num, const char *lun_name)
843 {
844 	struct spdk_vhost_scsi_dev *svdev;
845 	struct spdk_vhost_dev *vdev;
846 	struct spdk_scsi_dev *scsi_dev;
847 	char dev_name[SPDK_SCSI_DEV_MAX_NAME];
848 	int lun_id_list[1];
849 	char *lun_names_list[1];
850 
851 	if (ctrlr_name == NULL) {
852 		SPDK_ERRLOG("No controller name\n");
853 		return -EINVAL;
854 	}
855 
856 	if (scsi_dev_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
857 		SPDK_ERRLOG("Controller %d device number too big (max %d)\n", scsi_dev_num,
858 			    SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
859 		return -EINVAL;
860 	}
861 
862 	if (lun_name == NULL) {
863 		SPDK_ERRLOG("No lun name specified \n");
864 		return -EINVAL;
865 	} else if (strlen(lun_name) >= SPDK_SCSI_DEV_MAX_NAME) {
866 		SPDK_ERRLOG("LUN name '%s' too long (max %d).\n", lun_name, SPDK_SCSI_DEV_MAX_NAME - 1);
867 		return -1;
868 	}
869 
870 	vdev = spdk_vhost_dev_find(ctrlr_name);
871 	if (vdev == NULL) {
872 		SPDK_ERRLOG("Controller %s is not defined.\n", ctrlr_name);
873 		return -ENODEV;
874 	}
875 
876 	svdev = to_scsi_dev(vdev);
877 	if (svdev == NULL) {
878 		return -EINVAL;
879 	}
880 
881 	if (vdev->lcore != -1 && !spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
882 		SPDK_ERRLOG("%s: 'Dev %u' is in use and hot-attach is not enabled for this controller\n",
883 			    ctrlr_name, scsi_dev_num);
884 		return -ENOTSUP;
885 	}
886 
887 	if (svdev->scsi_dev[scsi_dev_num] != NULL) {
888 		SPDK_ERRLOG("Controller %s dev %u already occupied\n", ctrlr_name, scsi_dev_num);
889 		return -EEXIST;
890 	}
891 
892 	/*
893 	 * At this stage only one LUN per device
894 	 */
895 	snprintf(dev_name, sizeof(dev_name), "Dev %u", scsi_dev_num);
896 	lun_id_list[0] = 0;
897 	lun_names_list[0] = (char *)lun_name;
898 
899 	scsi_dev = spdk_scsi_dev_construct(dev_name, lun_names_list, lun_id_list, 1,
900 					   SPDK_SPC_PROTOCOL_IDENTIFIER_SAS, spdk_vhost_scsi_lun_hotremove, svdev);
901 	if (scsi_dev == NULL) {
902 		SPDK_ERRLOG("Couldn't create spdk SCSI device '%s' using lun device '%s' in controller: %s\n",
903 			    dev_name, lun_name, vdev->name);
904 		return -EINVAL;
905 	}
906 
907 	spdk_scsi_dev_add_port(scsi_dev, 0, "vhost");
908 
909 	if (vdev->lcore == -1) {
910 		svdev->detached_dev[scsi_dev_num] = false;
911 		svdev->scsi_dev[scsi_dev_num] = scsi_dev;
912 	} else {
913 		enqueue_vhost_event(svdev, SPDK_VHOST_SCSI_EVENT_HOTATTACH, scsi_dev_num, scsi_dev, NULL);
914 	}
915 
916 	SPDK_NOTICELOG("Controller %s: defined device '%s' using lun '%s'\n",
917 		       vdev->name, dev_name, lun_name);
918 	return 0;
919 }
920 
921 int
922 spdk_vhost_scsi_dev_remove_dev(struct spdk_vhost_dev *vdev, unsigned scsi_dev_num)
923 {
924 	struct spdk_vhost_scsi_dev *svdev;
925 	struct spdk_scsi_dev *scsi_dev;
926 
927 	if (scsi_dev_num >= SPDK_VHOST_SCSI_CTRLR_MAX_DEVS) {
928 		SPDK_ERRLOG("%s: invalid device number %d\n", vdev->name, scsi_dev_num);
929 		return -EINVAL;
930 	}
931 
932 	svdev = to_scsi_dev(vdev);
933 	if (svdev == NULL) {
934 		return -ENODEV;
935 	}
936 
937 	scsi_dev = svdev->scsi_dev[scsi_dev_num];
938 	if (scsi_dev == NULL) {
939 		SPDK_ERRLOG("Controller %s dev %u is not occupied\n", vdev->name, scsi_dev_num);
940 		return -ENODEV;
941 	}
942 
943 	if (svdev->vdev.lcore == -1) {
944 		/* controller is not in use, remove dev and exit */
945 		spdk_scsi_dev_destruct(scsi_dev);
946 		svdev->scsi_dev[scsi_dev_num] = NULL;
947 		SPDK_NOTICELOG("%s: removed device 'Dev %u'\n", vdev->name, scsi_dev_num);
948 		return 0;
949 	}
950 
951 	if (!spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
952 		SPDK_WARNLOG("%s: 'Dev %u' is in use and hot-detach is not enabled for this controller.\n",
953 			     svdev->vdev.name, scsi_dev_num);
954 		return -ENOTSUP;
955 	}
956 
957 	enqueue_vhost_event(svdev, SPDK_VHOST_SCSI_EVENT_HOTDETACH, scsi_dev_num, scsi_dev, NULL);
958 
959 	SPDK_NOTICELOG("%s: queued 'Dev %u' for hot-detach.\n", vdev->name, scsi_dev_num);
960 	return 0;
961 }
962 
963 int
964 spdk_vhost_scsi_controller_construct(void)
965 {
966 	struct spdk_conf_section *sp = spdk_conf_first_section(NULL);
967 	int i, dev_num;
968 	unsigned ctrlr_num = 0;
969 	char *lun_name, *dev_num_str;
970 	char *cpumask_str;
971 	char *name;
972 	uint64_t cpumask;
973 
974 	while (sp != NULL) {
975 		if (!spdk_conf_section_match_prefix(sp, "VhostScsi")) {
976 			sp = spdk_conf_next_section(sp);
977 			continue;
978 		}
979 
980 		if (sscanf(spdk_conf_section_get_name(sp), "VhostScsi%u", &ctrlr_num) != 1) {
981 			SPDK_ERRLOG("Section '%s' has non-numeric suffix.\n",
982 				    spdk_conf_section_get_name(sp));
983 			return -1;
984 		}
985 
986 		name =  spdk_conf_section_get_val(sp, "Name");
987 		cpumask_str = spdk_conf_section_get_val(sp, "Cpumask");
988 		if (cpumask_str == NULL) {
989 			cpumask = spdk_app_get_core_mask();
990 		} else if (spdk_vhost_parse_core_mask(cpumask_str, &cpumask)) {
991 			SPDK_ERRLOG("%s: Error parsing cpumask '%s' while creating controller\n", name, cpumask_str);
992 			return -1;
993 		}
994 
995 		if (spdk_vhost_scsi_dev_construct(name, cpumask) < 0) {
996 			return -1;
997 		}
998 
999 		for (i = 0; spdk_conf_section_get_nval(sp, "Dev", i) != NULL; i++) {
1000 			dev_num_str = spdk_conf_section_get_nmval(sp, "Dev", i, 0);
1001 			if (dev_num_str == NULL) {
1002 				SPDK_ERRLOG("%s: Invalid or missing Dev number\n", name);
1003 				return -1;
1004 			}
1005 
1006 			dev_num = (int)strtol(dev_num_str, NULL, 10);
1007 			lun_name = spdk_conf_section_get_nmval(sp, "Dev", i, 1);
1008 			if (lun_name == NULL) {
1009 				SPDK_ERRLOG("%s: Invalid or missing LUN name for dev %d\n", name, dev_num);
1010 				return -1;
1011 			} else if (spdk_conf_section_get_nmval(sp, "Dev", i, 2)) {
1012 				SPDK_ERRLOG("%s: Only one LUN per vhost SCSI device supported\n", name);
1013 				return -1;
1014 			}
1015 
1016 			if (spdk_vhost_scsi_dev_add_dev(name, dev_num, lun_name) < 0) {
1017 				return -1;
1018 			}
1019 		}
1020 
1021 		sp = spdk_conf_next_section(sp);
1022 
1023 	}
1024 
1025 	return 0;
1026 }
1027 
1028 static void
1029 free_task_pool(struct spdk_vhost_scsi_dev *svdev)
1030 {
1031 	struct spdk_vhost_task *task;
1032 
1033 	if (!svdev->task_pool) {
1034 		return;
1035 	}
1036 
1037 	while (spdk_ring_dequeue(svdev->task_pool, (void **)&task, 1) == 1) {
1038 		spdk_dma_free(task);
1039 	}
1040 
1041 	spdk_ring_free(svdev->task_pool);
1042 	svdev->task_pool = NULL;
1043 }
1044 
1045 static int
1046 alloc_task_pool(struct spdk_vhost_scsi_dev *svdev)
1047 {
1048 	struct spdk_vhost_scsi_task *task;
1049 	uint32_t task_cnt = 0;
1050 	uint32_t ring_size, socket_id;
1051 	uint16_t i;
1052 	int rc;
1053 
1054 	for (i = 0; i < svdev->vdev.num_queues; i++) {
1055 		/*
1056 		 * FIXME:
1057 		 * this is too big because we need only size/2 from each queue but for now
1058 		 * lets leave it as is to be sure we are not mistaken.
1059 		 *
1060 		 * Limit the pool size to 1024 * num_queues. This should be enough as QEMU have the
1061 		 * same hard limit for queue size.
1062 		 */
1063 		task_cnt += spdk_min(svdev->vdev.virtqueue[i].size, 1024);
1064 	}
1065 
1066 	ring_size = spdk_align32pow2(task_cnt + 1);
1067 	socket_id = spdk_env_get_socket_id(svdev->vdev.lcore);
1068 
1069 	svdev->task_pool = spdk_ring_create(SPDK_RING_TYPE_SP_SC, ring_size, socket_id);
1070 	if (svdev->task_pool == NULL) {
1071 		SPDK_ERRLOG("Controller %s: Failed to init vhost scsi task pool\n", svdev->vdev.name);
1072 		return -1;
1073 	}
1074 
1075 	for (i = 0; i < task_cnt; ++i) {
1076 		task = spdk_dma_zmalloc_socket(sizeof(*task), SPDK_CACHE_LINE_SIZE, NULL, socket_id);
1077 		if (task == NULL) {
1078 			SPDK_ERRLOG("Controller %s: Failed to allocate task\n", svdev->vdev.name);
1079 			free_task_pool(svdev);
1080 			return -1;
1081 		}
1082 
1083 		rc = spdk_ring_enqueue(svdev->task_pool, (void **)&task, 1);
1084 		if (rc != 1) {
1085 			SPDK_ERRLOG("Controller %s: Failed to enuqueue %"PRIu32" vhost scsi tasks\n", svdev->vdev.name,
1086 				    task_cnt);
1087 			free_task_pool(svdev);
1088 			return -1;
1089 		}
1090 	}
1091 
1092 	return 0;
1093 }
1094 
1095 /*
1096  * A new device is added to a data core. First the device is added to the main linked list
1097  * and then allocated to a specific data core.
1098  */
1099 static int
1100 new_device(struct spdk_vhost_dev *vdev)
1101 {
1102 	struct spdk_vhost_scsi_dev *svdev;
1103 	int rc;
1104 
1105 	svdev = to_scsi_dev(vdev);
1106 	if (svdev == NULL) {
1107 		SPDK_ERRLOG("Trying to start non-scsi controller as a scsi one.\n");
1108 		return -1;
1109 	}
1110 
1111 	rc = alloc_task_pool(svdev);
1112 	if (rc != 0) {
1113 		SPDK_ERRLOG("%s: failed to alloc task pool.\n", vdev->name);
1114 		return -1;
1115 	}
1116 
1117 	svdev->vhost_events = spdk_ring_create(SPDK_RING_TYPE_MP_SC, 16,
1118 					       spdk_env_get_socket_id(vdev->lcore));
1119 	if (svdev->vhost_events == NULL) {
1120 		SPDK_ERRLOG("%s: failed to alloc event pool.\n", vdev->name);
1121 		return -1;
1122 	}
1123 
1124 	spdk_vhost_timed_event_send(vdev->lcore, add_vdev_cb, svdev, 1, "add scsi vdev");
1125 
1126 	return 0;
1127 }
1128 
1129 static int
1130 destroy_device(struct spdk_vhost_dev *vdev)
1131 {
1132 	struct spdk_vhost_scsi_dev *svdev;
1133 	void *ev;
1134 	struct spdk_vhost_timed_event event = {0};
1135 	uint32_t i;
1136 
1137 	svdev = to_scsi_dev(vdev);
1138 	if (svdev == NULL) {
1139 		SPDK_ERRLOG("Trying to stop non-scsi controller as a scsi one.\n");
1140 		return -1;
1141 	}
1142 
1143 	spdk_vhost_timed_event_init(&event, vdev->lcore, NULL, NULL, 1);
1144 	spdk_poller_unregister(&svdev->requestq_poller, event.spdk_event);
1145 	spdk_vhost_timed_event_wait(&event, "unregister request queue poller");
1146 
1147 	spdk_vhost_timed_event_init(&event, vdev->lcore, NULL, NULL, 1);
1148 	spdk_poller_unregister(&svdev->mgmt_poller, event.spdk_event);
1149 	spdk_vhost_timed_event_wait(&event, "unregister management poller");
1150 
1151 	/* Wait for all tasks to finish */
1152 	for (i = 1000; i && vdev->task_cnt > 0; i--) {
1153 		usleep(1000);
1154 	}
1155 
1156 	if (vdev->task_cnt > 0) {
1157 		SPDK_ERRLOG("%s: pending tasks did not finish in 1s.\n", vdev->name);
1158 	}
1159 
1160 	spdk_vhost_timed_event_send(vdev->lcore, remove_vdev_cb, svdev, 1, "remove scsi vdev");
1161 
1162 	/* Flush not sent events */
1163 	while (spdk_ring_dequeue(svdev->vhost_events, &ev, 1) == 1) {
1164 		/* process vhost event, but don't send virtio event */
1165 		spdk_vhost_scsi_event_process(svdev, ev, NULL);
1166 		spdk_dma_free(ev);
1167 	}
1168 
1169 	spdk_ring_free(svdev->vhost_events);
1170 
1171 	free_task_pool(svdev);
1172 	return 0;
1173 }
1174 
1175 int
1176 spdk_vhost_init(void)
1177 {
1178 	return 0;
1179 }
1180 
1181 int
1182 spdk_vhost_fini(void)
1183 {
1184 	return 0;
1185 }
1186 
1187 static void
1188 spdk_vhost_scsi_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
1189 {
1190 	struct spdk_scsi_dev *sdev;
1191 	struct spdk_scsi_lun *lun;
1192 	uint32_t dev_idx;
1193 	uint32_t lun_idx;
1194 
1195 	assert(vdev != NULL);
1196 	spdk_json_write_name(w, "scsi");
1197 	spdk_json_write_object_begin(w);
1198 	for (dev_idx = 0; dev_idx < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; dev_idx++) {
1199 		sdev = spdk_vhost_scsi_dev_get_dev(vdev, dev_idx);
1200 		if (!sdev) {
1201 			continue;
1202 		}
1203 
1204 		spdk_json_write_name(w, "scsi_dev_num");
1205 		spdk_json_write_uint32(w, dev_idx);
1206 
1207 		spdk_json_write_name(w, "id");
1208 		spdk_json_write_int32(w, spdk_scsi_dev_get_id(sdev));
1209 
1210 		spdk_json_write_name(w, "device_name");
1211 		spdk_json_write_string(w, spdk_scsi_dev_get_name(sdev));
1212 
1213 		spdk_json_write_name(w, "luns");
1214 		spdk_json_write_array_begin(w);
1215 
1216 		for (lun_idx = 0; lun_idx < SPDK_SCSI_DEV_MAX_LUN; lun_idx++) {
1217 			lun = spdk_scsi_dev_get_lun(sdev, lun_idx);
1218 			if (!lun) {
1219 				continue;
1220 			}
1221 
1222 			spdk_json_write_object_begin(w);
1223 
1224 			spdk_json_write_name(w, "id");
1225 			spdk_json_write_int32(w, spdk_scsi_lun_get_id(lun));
1226 
1227 			spdk_json_write_name(w, "name");
1228 			spdk_json_write_string(w, spdk_scsi_lun_get_name(lun));
1229 
1230 			spdk_json_write_object_end(w);
1231 		}
1232 
1233 		spdk_json_write_array_end(w);
1234 	}
1235 
1236 	spdk_json_write_object_end(w);
1237 }
1238 
1239 SPDK_LOG_REGISTER_TRACE_FLAG("vhost_scsi", SPDK_TRACE_VHOST_SCSI)
1240 SPDK_LOG_REGISTER_TRACE_FLAG("vhost_scsi_queue", SPDK_TRACE_VHOST_SCSI_QUEUE)
1241 SPDK_LOG_REGISTER_TRACE_FLAG("vhost_scsi_data", SPDK_TRACE_VHOST_SCSI_DATA)
1242