xref: /spdk/module/bdev/virtio/bdev_virtio_scsi.c (revision 94a84ae98590bea46939eb1dcd7a9876bd393b54)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/bdev.h"
37 #include "spdk/conf.h"
38 #include "spdk/endian.h"
39 #include "spdk/env.h"
40 #include "spdk/thread.h"
41 #include "spdk/scsi_spec.h"
42 #include "spdk/string.h"
43 #include "spdk/util.h"
44 #include "spdk/json.h"
45 
46 #include "spdk/bdev_module.h"
47 #include "spdk_internal/log.h"
48 #include "spdk_internal/virtio.h"
49 #include "spdk_internal/vhost_user.h"
50 
51 #include <linux/virtio_scsi.h>
52 
53 #include "bdev_virtio.h"
54 
55 #define BDEV_VIRTIO_MAX_TARGET 64
56 #define BDEV_VIRTIO_SCAN_PAYLOAD_SIZE 256
57 #define MGMT_POLL_PERIOD_US (1000 * 5)
58 #define CTRLQ_RING_SIZE 16
59 #define SCAN_REQUEST_RETRIES 5
60 
61 /* Number of non-request queues - eventq and controlq */
62 #define SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED 2
63 
64 #define VIRTIO_SCSI_EVENTQ_BUFFER_COUNT 16
65 
66 #define VIRTIO_SCSI_CONTROLQ	0
67 #define VIRTIO_SCSI_EVENTQ	1
68 #define VIRTIO_SCSI_REQUESTQ	2
69 
70 static int bdev_virtio_initialize(void);
71 static void bdev_virtio_finish(void);
72 
73 struct virtio_scsi_dev {
74 	/* Generic virtio device data. */
75 	struct virtio_dev		vdev;
76 
77 	/** Detected SCSI LUNs */
78 	TAILQ_HEAD(, virtio_scsi_disk)	luns;
79 
80 	/** Context for the SCSI target scan. */
81 	struct virtio_scsi_scan_base	*scan_ctx;
82 
83 	/** Controlq poller. */
84 	struct spdk_poller		*mgmt_poller;
85 
86 	/** Controlq messages to be sent. */
87 	struct spdk_ring		*ctrlq_ring;
88 
89 	/** Buffers for the eventq. */
90 	struct virtio_scsi_eventq_io	*eventq_ios;
91 
92 	/** Device marked for removal. */
93 	bool				removed;
94 
95 	/** Callback to be called after vdev removal. */
96 	bdev_virtio_remove_cb		remove_cb;
97 
98 	/** Context for the `remove_cb`. */
99 	void				*remove_ctx;
100 
101 	TAILQ_ENTRY(virtio_scsi_dev) tailq;
102 };
103 
104 struct virtio_scsi_io_ctx {
105 	struct iovec			iov_req;
106 	struct iovec			iov_resp;
107 	union {
108 		struct virtio_scsi_cmd_req req;
109 		struct virtio_scsi_ctrl_tmf_req tmf_req;
110 	};
111 	union {
112 		struct virtio_scsi_cmd_resp resp;
113 		struct virtio_scsi_ctrl_tmf_resp tmf_resp;
114 	};
115 };
116 
117 struct virtio_scsi_eventq_io {
118 	struct iovec			iov;
119 	struct virtio_scsi_event	ev;
120 };
121 
122 struct virtio_scsi_scan_info {
123 	uint64_t			num_blocks;
124 	uint32_t			block_size;
125 	uint8_t				target;
126 	bool				unmap_supported;
127 	TAILQ_ENTRY(virtio_scsi_scan_info) tailq;
128 };
129 
130 struct virtio_scsi_scan_base {
131 	struct virtio_scsi_dev		*svdev;
132 
133 	/** I/O channel used for the scan I/O. */
134 	struct bdev_virtio_io_channel	*channel;
135 
136 	bdev_virtio_create_cb		cb_fn;
137 	void				*cb_arg;
138 
139 	/** Scan all targets on the device. */
140 	bool				full_scan;
141 
142 	/** Start a full rescan after receiving next scan I/O response. */
143 	bool				restart;
144 
145 	/** Additional targets to be (re)scanned. */
146 	TAILQ_HEAD(, virtio_scsi_scan_info) scan_queue;
147 
148 	/** Remaining attempts for sending the current request. */
149 	unsigned                        retries;
150 
151 	/** If set, the last scan I/O needs to be resent */
152 	bool				needs_resend;
153 
154 	struct virtio_scsi_io_ctx	io_ctx;
155 	struct iovec			iov;
156 	uint8_t				payload[BDEV_VIRTIO_SCAN_PAYLOAD_SIZE];
157 
158 	/** Scan results for the current target. */
159 	struct virtio_scsi_scan_info	info;
160 };
161 
162 struct virtio_scsi_disk {
163 	struct spdk_bdev		bdev;
164 	struct virtio_scsi_dev		*svdev;
165 	struct virtio_scsi_scan_info	info;
166 
167 	/** Descriptor opened just to be notified of external bdev hotremove. */
168 	struct spdk_bdev_desc		*notify_desc;
169 
170 	/** Disk marked for removal. */
171 	bool				removed;
172 	TAILQ_ENTRY(virtio_scsi_disk)	link;
173 };
174 
175 struct bdev_virtio_io_channel {
176 	struct virtio_scsi_dev	*svdev;
177 
178 	/** Virtqueue exclusively assigned to this channel. */
179 	struct virtqueue	*vq;
180 
181 	/** Virtio response poller. */
182 	struct spdk_poller	*poller;
183 };
184 
185 static TAILQ_HEAD(, virtio_scsi_dev) g_virtio_scsi_devs =
186 	TAILQ_HEAD_INITIALIZER(g_virtio_scsi_devs);
187 
188 static pthread_mutex_t g_virtio_scsi_mutex = PTHREAD_MUTEX_INITIALIZER;
189 
190 /** Module finish in progress */
191 static bool g_bdev_virtio_finish = false;
192 
193 /* Features desired/implemented by this driver. */
194 #define VIRTIO_SCSI_DEV_SUPPORTED_FEATURES		\
195 	(1ULL << VIRTIO_SCSI_F_INOUT		|	\
196 	 1ULL << VIRTIO_SCSI_F_HOTPLUG		|	\
197 	 1ULL << VIRTIO_RING_F_EVENT_IDX	|	\
198 	 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
199 
200 static void virtio_scsi_dev_unregister_cb(void *io_device);
201 static void virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev,
202 				   bdev_virtio_remove_cb cb_fn, void *cb_arg);
203 static int bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf);
204 static void bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf);
205 static void process_scan_resp(struct virtio_scsi_scan_base *base);
206 static int bdev_virtio_mgmt_poll(void *arg);
207 
208 static int
209 virtio_scsi_dev_send_eventq_io(struct virtqueue *vq, struct virtio_scsi_eventq_io *io)
210 {
211 	int rc;
212 
213 	rc = virtqueue_req_start(vq, io, 1);
214 	if (rc != 0) {
215 		return -1;
216 	}
217 
218 	virtqueue_req_add_iovs(vq, &io->iov, 1, SPDK_VIRTIO_DESC_WR);
219 	virtqueue_req_flush(vq);
220 
221 	return 0;
222 }
223 
224 static int
225 virtio_scsi_dev_init(struct virtio_scsi_dev *svdev, uint16_t max_queues)
226 {
227 	struct virtio_dev *vdev = &svdev->vdev;
228 	struct spdk_ring *ctrlq_ring;
229 	struct virtio_scsi_eventq_io *eventq_io;
230 	struct virtqueue *eventq;
231 	uint16_t i, num_events;
232 	int rc;
233 
234 	rc = virtio_dev_reset(vdev, VIRTIO_SCSI_DEV_SUPPORTED_FEATURES);
235 	if (rc != 0) {
236 		return rc;
237 	}
238 
239 	rc = virtio_dev_start(vdev, max_queues, SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED);
240 	if (rc != 0) {
241 		return rc;
242 	}
243 
244 	ctrlq_ring = spdk_ring_create(SPDK_RING_TYPE_MP_SC, CTRLQ_RING_SIZE,
245 				      SPDK_ENV_SOCKET_ID_ANY);
246 	if (ctrlq_ring == NULL) {
247 		SPDK_ERRLOG("Failed to allocate send ring for the controlq.\n");
248 		return -1;
249 	}
250 
251 	rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_CONTROLQ);
252 	if (rc != 0) {
253 		SPDK_ERRLOG("Failed to acquire the controlq.\n");
254 		spdk_ring_free(ctrlq_ring);
255 		return -1;
256 	}
257 
258 	rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_EVENTQ);
259 	if (rc != 0) {
260 		SPDK_ERRLOG("Failed to acquire the eventq.\n");
261 		virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ);
262 		spdk_ring_free(ctrlq_ring);
263 		return -1;
264 	}
265 
266 	eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ];
267 	num_events = spdk_min(eventq->vq_nentries, VIRTIO_SCSI_EVENTQ_BUFFER_COUNT);
268 	svdev->eventq_ios = spdk_zmalloc(sizeof(*svdev->eventq_ios) * num_events,
269 					 0, NULL, SPDK_ENV_LCORE_ID_ANY,
270 					 SPDK_MALLOC_DMA);
271 	if (svdev->eventq_ios == NULL) {
272 		SPDK_ERRLOG("cannot allocate memory for %"PRIu16" eventq buffers\n",
273 			    num_events);
274 		virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ);
275 		virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ);
276 		spdk_ring_free(ctrlq_ring);
277 		return -1;
278 	}
279 
280 	for (i = 0; i < num_events; i++) {
281 		eventq_io = &svdev->eventq_ios[i];
282 		eventq_io->iov.iov_base = &eventq_io->ev;
283 		eventq_io->iov.iov_len = sizeof(eventq_io->ev);
284 		virtio_scsi_dev_send_eventq_io(eventq, eventq_io);
285 	}
286 
287 	svdev->ctrlq_ring = ctrlq_ring;
288 
289 	svdev->mgmt_poller = spdk_poller_register(bdev_virtio_mgmt_poll, svdev,
290 			     MGMT_POLL_PERIOD_US);
291 
292 	TAILQ_INIT(&svdev->luns);
293 	svdev->scan_ctx = NULL;
294 	svdev->removed = false;
295 	svdev->remove_cb = NULL;
296 	svdev->remove_ctx = NULL;
297 
298 	spdk_io_device_register(svdev, bdev_virtio_scsi_ch_create_cb,
299 				bdev_virtio_scsi_ch_destroy_cb,
300 				sizeof(struct bdev_virtio_io_channel),
301 				svdev->vdev.name);
302 
303 	pthread_mutex_lock(&g_virtio_scsi_mutex);
304 	TAILQ_INSERT_TAIL(&g_virtio_scsi_devs, svdev, tailq);
305 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
306 	return 0;
307 }
308 
309 static struct virtio_scsi_dev *
310 virtio_pci_scsi_dev_create(const char *name, struct virtio_pci_ctx *pci_ctx)
311 {
312 	static int pci_dev_counter = 0;
313 	struct virtio_scsi_dev *svdev;
314 	struct virtio_dev *vdev;
315 	char *default_name = NULL;
316 	uint32_t num_queues;
317 	int rc;
318 
319 	svdev = calloc(1, sizeof(*svdev));
320 	if (svdev == NULL) {
321 		SPDK_ERRLOG("virtio device calloc failed\n");
322 		return NULL;
323 	}
324 
325 	vdev = &svdev->vdev;
326 	if (name == NULL) {
327 		default_name = spdk_sprintf_alloc("VirtioScsi%"PRIu32, pci_dev_counter++);
328 		if (default_name == NULL) {
329 			free(vdev);
330 			return NULL;
331 		}
332 		name = default_name;
333 	}
334 
335 	rc = virtio_pci_dev_init(vdev, name, pci_ctx);
336 	free(default_name);
337 
338 	if (rc != 0) {
339 		free(svdev);
340 		return NULL;
341 	}
342 
343 	rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_scsi_config, num_queues),
344 					&num_queues, sizeof(num_queues));
345 	if (rc) {
346 		SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc));
347 		virtio_dev_destruct(vdev);
348 		free(svdev);
349 		return NULL;
350 	}
351 
352 	rc = virtio_scsi_dev_init(svdev, num_queues);
353 	if (rc != 0) {
354 		virtio_dev_destruct(vdev);
355 		free(svdev);
356 		return NULL;
357 	}
358 
359 	return svdev;
360 }
361 
362 static struct virtio_scsi_dev *
363 virtio_user_scsi_dev_create(const char *name, const char *path,
364 			    uint16_t num_queues, uint32_t queue_size)
365 {
366 	struct virtio_scsi_dev *svdev;
367 	struct virtio_dev *vdev;
368 	int rc;
369 
370 	svdev = calloc(1, sizeof(*svdev));
371 	if (svdev == NULL) {
372 		SPDK_ERRLOG("calloc failed for virtio device %s: %s\n", name, path);
373 		return NULL;
374 	}
375 
376 	vdev = &svdev->vdev;
377 	rc = virtio_user_dev_init(vdev, name, path, queue_size);
378 	if (rc != 0) {
379 		SPDK_ERRLOG("Failed to create virito device %s: %s\n", name, path);
380 		free(svdev);
381 		return NULL;
382 	}
383 
384 	rc = virtio_scsi_dev_init(svdev, num_queues);
385 	if (rc != 0) {
386 		virtio_dev_destruct(vdev);
387 		free(svdev);
388 		return NULL;
389 	}
390 
391 	return svdev;
392 }
393 
394 static struct virtio_scsi_disk *
395 virtio_scsi_dev_get_disk_by_id(struct virtio_scsi_dev *svdev, uint8_t target_id)
396 {
397 	struct virtio_scsi_disk *disk;
398 
399 	TAILQ_FOREACH(disk, &svdev->luns, link) {
400 		if (disk->info.target == target_id) {
401 			return disk;
402 		}
403 	}
404 
405 	return NULL;
406 }
407 
408 static int virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev,
409 				bdev_virtio_create_cb cb_fn, void *cb_arg);
410 static int send_scan_io(struct virtio_scsi_scan_base *base);
411 static void _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target);
412 static int _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc);
413 static void _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum);
414 static int virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target);
415 
416 static int
417 bdev_virtio_get_ctx_size(void)
418 {
419 	return sizeof(struct virtio_scsi_io_ctx);
420 }
421 
422 static int
423 bdev_virtio_scsi_config_json(struct spdk_json_write_ctx *w)
424 {
425 	struct virtio_scsi_dev *svdev;
426 
427 	pthread_mutex_lock(&g_virtio_scsi_mutex);
428 	TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) {
429 		spdk_json_write_object_begin(w);
430 
431 		spdk_json_write_named_string(w, "method", "bdev_virtio_attach_controller");
432 
433 		spdk_json_write_named_object_begin(w, "params");
434 		spdk_json_write_named_string(w, "name", svdev->vdev.name);
435 		spdk_json_write_named_string(w, "dev_type", "scsi");
436 
437 		/* Write transport specific parameters. */
438 		svdev->vdev.backend_ops->write_json_config(&svdev->vdev, w);
439 
440 		spdk_json_write_object_end(w);
441 
442 		spdk_json_write_object_end(w);
443 
444 	}
445 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
446 
447 	return 0;
448 }
449 
450 
451 static struct spdk_bdev_module virtio_scsi_if = {
452 	.name = "virtio_scsi",
453 	.module_init = bdev_virtio_initialize,
454 	.module_fini = bdev_virtio_finish,
455 	.get_ctx_size = bdev_virtio_get_ctx_size,
456 	.config_json = bdev_virtio_scsi_config_json,
457 	.async_init = true,
458 	.async_fini = true,
459 };
460 
461 SPDK_BDEV_MODULE_REGISTER(virtio_scsi, &virtio_scsi_if)
462 
463 static struct virtio_scsi_io_ctx *
464 bdev_virtio_init_io_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
465 {
466 	struct virtio_scsi_cmd_req *req;
467 	struct virtio_scsi_cmd_resp *resp;
468 	struct virtio_scsi_disk *disk = (struct virtio_scsi_disk *)bdev_io->bdev;
469 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
470 
471 	req = &io_ctx->req;
472 	resp = &io_ctx->resp;
473 
474 	io_ctx->iov_req.iov_base = req;
475 	io_ctx->iov_req.iov_len = sizeof(*req);
476 
477 	io_ctx->iov_resp.iov_base = resp;
478 	io_ctx->iov_resp.iov_len = sizeof(*resp);
479 
480 	memset(req, 0, sizeof(*req));
481 	req->lun[0] = 1;
482 	req->lun[1] = disk->info.target;
483 
484 	return io_ctx;
485 }
486 
487 static struct virtio_scsi_io_ctx *
488 bdev_virtio_init_tmf_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
489 {
490 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
491 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
492 	struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev);
493 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
494 
495 	tmf_req = &io_ctx->tmf_req;
496 	tmf_resp = &io_ctx->tmf_resp;
497 
498 	io_ctx->iov_req.iov_base = tmf_req;
499 	io_ctx->iov_req.iov_len = sizeof(*tmf_req);
500 	io_ctx->iov_resp.iov_base = tmf_resp;
501 	io_ctx->iov_resp.iov_len = sizeof(*tmf_resp);
502 
503 	memset(tmf_req, 0, sizeof(*tmf_req));
504 	tmf_req->lun[0] = 1;
505 	tmf_req->lun[1] = disk->info.target;
506 
507 	return io_ctx;
508 }
509 
510 static void
511 bdev_virtio_send_io(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
512 {
513 	struct bdev_virtio_io_channel *virtio_channel = spdk_io_channel_get_ctx(ch);
514 	struct virtqueue *vq = virtio_channel->vq;
515 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
516 	int rc;
517 
518 	rc = virtqueue_req_start(vq, bdev_io, bdev_io->u.bdev.iovcnt + 2);
519 	if (rc == -ENOMEM) {
520 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
521 		return;
522 	} else if (rc != 0) {
523 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
524 		return;
525 	}
526 
527 	virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO);
528 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
529 		virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
530 		virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
531 				       SPDK_VIRTIO_DESC_WR);
532 	} else {
533 		virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
534 				       SPDK_VIRTIO_DESC_RO);
535 		virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
536 	}
537 
538 	virtqueue_req_flush(vq);
539 }
540 
541 static void
542 bdev_virtio_rw(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
543 {
544 	struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev);
545 	struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io);
546 	struct virtio_scsi_cmd_req *req = &io_ctx->req;
547 	bool is_write = bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE;
548 
549 	if (disk->info.num_blocks > (1ULL << 32)) {
550 		req->cdb[0] = is_write ? SPDK_SBC_WRITE_16 : SPDK_SBC_READ_16;
551 		to_be64(&req->cdb[2], bdev_io->u.bdev.offset_blocks);
552 		to_be32(&req->cdb[10], bdev_io->u.bdev.num_blocks);
553 	} else {
554 		req->cdb[0] = is_write ? SPDK_SBC_WRITE_10 : SPDK_SBC_READ_10;
555 		to_be32(&req->cdb[2], bdev_io->u.bdev.offset_blocks);
556 		to_be16(&req->cdb[7], bdev_io->u.bdev.num_blocks);
557 	}
558 
559 	bdev_virtio_send_io(ch, bdev_io);
560 }
561 
562 static void
563 bdev_virtio_reset(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
564 {
565 	struct bdev_virtio_io_channel *virtio_ch = spdk_io_channel_get_ctx(ch);
566 	struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_tmf_vreq(ch, bdev_io);
567 	struct virtio_scsi_ctrl_tmf_req *tmf_req = &io_ctx->tmf_req;
568 	struct virtio_scsi_dev *svdev = virtio_ch->svdev;
569 	size_t enqueued_count;
570 
571 	tmf_req->type = VIRTIO_SCSI_T_TMF;
572 	tmf_req->subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
573 
574 	enqueued_count = spdk_ring_enqueue(svdev->ctrlq_ring, (void **)&bdev_io, 1, NULL);
575 	if (spdk_likely(enqueued_count == 1)) {
576 		return;
577 	} else {
578 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
579 	}
580 }
581 
582 static void
583 bdev_virtio_unmap(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
584 {
585 	struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io);
586 	struct virtio_scsi_cmd_req *req = &io_ctx->req;
587 	struct spdk_scsi_unmap_bdesc *desc, *first_desc;
588 	uint8_t *buf;
589 	uint64_t offset_blocks, num_blocks;
590 	uint16_t cmd_len;
591 
592 	if (!success) {
593 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
594 		return;
595 	}
596 
597 	buf = bdev_io->u.bdev.iovs[0].iov_base;
598 
599 	offset_blocks = bdev_io->u.bdev.offset_blocks;
600 	num_blocks = bdev_io->u.bdev.num_blocks;
601 
602 	/* (n-1) * 16-byte descriptors */
603 	first_desc = desc = (struct spdk_scsi_unmap_bdesc *)&buf[8];
604 	while (num_blocks > UINT32_MAX) {
605 		to_be64(&desc->lba, offset_blocks);
606 		to_be32(&desc->block_count, UINT32_MAX);
607 		memset(&desc->reserved, 0, sizeof(desc->reserved));
608 		offset_blocks += UINT32_MAX;
609 		num_blocks -= UINT32_MAX;
610 		desc++;
611 	}
612 
613 	/* The last descriptor with block_count <= UINT32_MAX */
614 	to_be64(&desc->lba, offset_blocks);
615 	to_be32(&desc->block_count, num_blocks);
616 	memset(&desc->reserved, 0, sizeof(desc->reserved));
617 
618 	/* 8-byte header + n * 16-byte block descriptor */
619 	cmd_len = 8 + (desc - first_desc + 1) *  sizeof(struct spdk_scsi_unmap_bdesc);
620 
621 	req->cdb[0] = SPDK_SBC_UNMAP;
622 	to_be16(&req->cdb[7], cmd_len);
623 
624 	/* 8-byte header */
625 	to_be16(&buf[0], cmd_len - 2); /* total length (excluding the length field) */
626 	to_be16(&buf[2], cmd_len - 8); /* length of block descriptors */
627 	memset(&buf[4], 0, 4); /* reserved */
628 
629 	bdev_virtio_send_io(ch, bdev_io);
630 }
631 
632 static void
633 bdev_virtio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
634 		       bool success)
635 {
636 	if (!success) {
637 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
638 		return;
639 	}
640 
641 	bdev_virtio_rw(ch, bdev_io);
642 }
643 
644 static int _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
645 {
646 	struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev);
647 
648 	switch (bdev_io->type) {
649 	case SPDK_BDEV_IO_TYPE_READ:
650 		spdk_bdev_io_get_buf(bdev_io, bdev_virtio_get_buf_cb,
651 				     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
652 		return 0;
653 	case SPDK_BDEV_IO_TYPE_WRITE:
654 		bdev_virtio_rw(ch, bdev_io);
655 		return 0;
656 	case SPDK_BDEV_IO_TYPE_RESET:
657 		bdev_virtio_reset(ch, bdev_io);
658 		return 0;
659 	case SPDK_BDEV_IO_TYPE_UNMAP: {
660 		uint64_t buf_len = 8 /* header size */ +
661 				   (bdev_io->u.bdev.num_blocks + UINT32_MAX - 1) /
662 				   UINT32_MAX * sizeof(struct spdk_scsi_unmap_bdesc);
663 
664 		if (!disk->info.unmap_supported) {
665 			return -1;
666 		}
667 
668 		if (buf_len > SPDK_BDEV_LARGE_BUF_MAX_SIZE) {
669 			SPDK_ERRLOG("Trying to UNMAP too many blocks: %"PRIu64"\n",
670 				    bdev_io->u.bdev.num_blocks);
671 			return -1;
672 		}
673 		spdk_bdev_io_get_buf(bdev_io, bdev_virtio_unmap, buf_len);
674 		return 0;
675 	}
676 	case SPDK_BDEV_IO_TYPE_FLUSH:
677 	default:
678 		return -1;
679 	}
680 	return 0;
681 }
682 
683 static void bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
684 {
685 	if (_bdev_virtio_submit_request(ch, bdev_io) < 0) {
686 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
687 	}
688 }
689 
690 static bool
691 bdev_virtio_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
692 {
693 	struct virtio_scsi_disk *disk = ctx;
694 
695 	switch (io_type) {
696 	case SPDK_BDEV_IO_TYPE_READ:
697 	case SPDK_BDEV_IO_TYPE_WRITE:
698 	case SPDK_BDEV_IO_TYPE_FLUSH:
699 	case SPDK_BDEV_IO_TYPE_RESET:
700 		return true;
701 
702 	case SPDK_BDEV_IO_TYPE_UNMAP:
703 		return disk->info.unmap_supported;
704 
705 	default:
706 		return false;
707 	}
708 }
709 
710 static struct spdk_io_channel *
711 bdev_virtio_get_io_channel(void *ctx)
712 {
713 	struct virtio_scsi_disk *disk = ctx;
714 
715 	return spdk_get_io_channel(disk->svdev);
716 }
717 
718 static int
719 bdev_virtio_disk_destruct(void *ctx)
720 {
721 	struct virtio_scsi_disk *disk = ctx;
722 	struct virtio_scsi_dev *svdev = disk->svdev;
723 
724 	TAILQ_REMOVE(&svdev->luns, disk, link);
725 	free(disk->bdev.name);
726 	free(disk);
727 
728 	if (svdev->removed && TAILQ_EMPTY(&svdev->luns)) {
729 		spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb);
730 	}
731 
732 	return 0;
733 }
734 
735 static int
736 bdev_virtio_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
737 {
738 	struct virtio_scsi_disk *disk = ctx;
739 
740 	virtio_dev_dump_json_info(&disk->svdev->vdev, w);
741 	return 0;
742 }
743 
744 static void
745 bdev_virtio_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
746 {
747 	/* SCSI targets and LUNS are discovered during scan process so nothing
748 	 * to save here.
749 	 */
750 }
751 
752 static const struct spdk_bdev_fn_table virtio_fn_table = {
753 	.destruct		= bdev_virtio_disk_destruct,
754 	.submit_request		= bdev_virtio_submit_request,
755 	.io_type_supported	= bdev_virtio_io_type_supported,
756 	.get_io_channel		= bdev_virtio_get_io_channel,
757 	.dump_info_json		= bdev_virtio_dump_info_json,
758 	.write_config_json	= bdev_virtio_write_config_json,
759 };
760 
761 static void
762 get_scsi_status(struct virtio_scsi_cmd_resp *resp, int *sk, int *asc, int *ascq)
763 {
764 	/* see spdk_scsi_task_build_sense_data() for sense data details */
765 	*sk = 0;
766 	*asc = 0;
767 	*ascq = 0;
768 
769 	if (resp->sense_len < 3) {
770 		return;
771 	}
772 
773 	*sk = resp->sense[2] & 0xf;
774 
775 	if (resp->sense_len < 13) {
776 		return;
777 	}
778 
779 	*asc = resp->sense[12];
780 
781 	if (resp->sense_len < 14) {
782 		return;
783 	}
784 
785 	*ascq = resp->sense[13];
786 }
787 
788 static void
789 bdev_virtio_io_cpl(struct spdk_bdev_io *bdev_io)
790 {
791 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
792 	int sk, asc, ascq;
793 
794 	get_scsi_status(&io_ctx->resp, &sk, &asc, &ascq);
795 	spdk_bdev_io_complete_scsi_status(bdev_io, io_ctx->resp.status, sk, asc, ascq);
796 }
797 
798 static int
799 bdev_virtio_poll(void *arg)
800 {
801 	struct bdev_virtio_io_channel *ch = arg;
802 	struct virtio_scsi_dev *svdev = ch->svdev;
803 	struct virtio_scsi_scan_base *scan_ctx = svdev->scan_ctx;
804 	void *io[32];
805 	uint32_t io_len[32];
806 	uint16_t i, cnt;
807 	int rc;
808 
809 	cnt = virtio_recv_pkts(ch->vq, (void **)io, io_len, SPDK_COUNTOF(io));
810 	for (i = 0; i < cnt; ++i) {
811 		if (spdk_unlikely(scan_ctx && io[i] == &scan_ctx->io_ctx)) {
812 			if (svdev->removed) {
813 				_virtio_scsi_dev_scan_finish(scan_ctx, -EINTR);
814 				return -1;
815 			}
816 
817 			if (scan_ctx->restart) {
818 				scan_ctx->restart = false;
819 				scan_ctx->full_scan = true;
820 				_virtio_scsi_dev_scan_tgt(scan_ctx, 0);
821 				continue;
822 			}
823 
824 			process_scan_resp(scan_ctx);
825 			continue;
826 		}
827 
828 		bdev_virtio_io_cpl(io[i]);
829 	}
830 
831 	if (spdk_unlikely(scan_ctx && scan_ctx->needs_resend)) {
832 		if (svdev->removed) {
833 			_virtio_scsi_dev_scan_finish(scan_ctx, -EINTR);
834 			return -1;
835 		} else if (cnt == 0) {
836 			return 0;
837 		}
838 
839 		rc = send_scan_io(scan_ctx);
840 		if (rc != 0) {
841 			assert(scan_ctx->retries > 0);
842 			scan_ctx->retries--;
843 			if (scan_ctx->retries == 0) {
844 				SPDK_ERRLOG("Target scan failed unrecoverably with rc = %d.\n", rc);
845 				_virtio_scsi_dev_scan_finish(scan_ctx, rc);
846 			}
847 		}
848 	}
849 
850 	return cnt;
851 }
852 
853 static void
854 bdev_virtio_tmf_cpl_cb(void *ctx)
855 {
856 	struct spdk_bdev_io *bdev_io = ctx;
857 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
858 
859 	if (io_ctx->tmf_resp.response == VIRTIO_SCSI_S_OK) {
860 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
861 	} else {
862 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
863 	}
864 }
865 
866 static void
867 bdev_virtio_tmf_cpl(struct spdk_bdev_io *bdev_io)
868 {
869 	spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), bdev_virtio_tmf_cpl_cb, bdev_io);
870 }
871 
872 static void
873 bdev_virtio_eventq_io_cpl(struct virtio_scsi_dev *svdev, struct virtio_scsi_eventq_io *io)
874 {
875 	struct virtio_scsi_event *ev = &io->ev;
876 	struct virtio_scsi_disk *disk;
877 
878 	if (ev->lun[0] != 1) {
879 		SPDK_WARNLOG("Received an event with invalid data layout.\n");
880 		goto out;
881 	}
882 
883 	if (ev->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
884 		ev->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
885 		virtio_scsi_dev_scan(svdev, NULL, NULL);
886 	}
887 
888 	switch (ev->event) {
889 	case VIRTIO_SCSI_T_NO_EVENT:
890 		break;
891 	case VIRTIO_SCSI_T_TRANSPORT_RESET:
892 		switch (ev->reason) {
893 		case VIRTIO_SCSI_EVT_RESET_RESCAN:
894 			virtio_scsi_dev_scan_tgt(svdev, ev->lun[1]);
895 			break;
896 		case VIRTIO_SCSI_EVT_RESET_REMOVED:
897 			disk = virtio_scsi_dev_get_disk_by_id(svdev, ev->lun[1]);
898 			if (disk != NULL) {
899 				spdk_bdev_unregister(&disk->bdev, NULL, NULL);
900 			}
901 			break;
902 		default:
903 			break;
904 		}
905 		break;
906 	default:
907 		break;
908 	}
909 
910 out:
911 	virtio_scsi_dev_send_eventq_io(svdev->vdev.vqs[VIRTIO_SCSI_EVENTQ], io);
912 }
913 
914 static void
915 bdev_virtio_tmf_abort_nomem_cb(void *ctx)
916 {
917 	struct spdk_bdev_io *bdev_io = ctx;
918 
919 	spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
920 }
921 
922 static void
923 bdev_virtio_tmf_abort_ioerr_cb(void *ctx)
924 {
925 	struct spdk_bdev_io *bdev_io = ctx;
926 
927 	spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
928 }
929 
930 static void
931 bdev_virtio_tmf_abort(struct spdk_bdev_io *bdev_io, int status)
932 {
933 	spdk_msg_fn fn;
934 
935 	if (status == -ENOMEM) {
936 		fn = bdev_virtio_tmf_abort_nomem_cb;
937 	} else {
938 		fn = bdev_virtio_tmf_abort_ioerr_cb;
939 	}
940 
941 	spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), fn, bdev_io);
942 }
943 
944 static int
945 bdev_virtio_send_tmf_io(struct virtqueue *ctrlq, struct spdk_bdev_io *bdev_io)
946 {
947 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
948 	int rc;
949 
950 	rc = virtqueue_req_start(ctrlq, bdev_io, 2);
951 	if (rc != 0) {
952 		return rc;
953 	}
954 
955 	virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO);
956 	virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
957 
958 	virtqueue_req_flush(ctrlq);
959 	return 0;
960 }
961 
962 static int
963 bdev_virtio_mgmt_poll(void *arg)
964 {
965 	struct virtio_scsi_dev *svdev = arg;
966 	struct virtio_dev *vdev = &svdev->vdev;
967 	struct virtqueue *eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ];
968 	struct virtqueue *ctrlq = vdev->vqs[VIRTIO_SCSI_CONTROLQ];
969 	struct spdk_ring *send_ring = svdev->ctrlq_ring;
970 	void *io[16];
971 	uint32_t io_len[16];
972 	uint16_t i, cnt;
973 	int rc;
974 	int total = 0;
975 
976 	cnt = spdk_ring_dequeue(send_ring, io, SPDK_COUNTOF(io));
977 	total += cnt;
978 	for (i = 0; i < cnt; ++i) {
979 		rc = bdev_virtio_send_tmf_io(ctrlq, io[i]);
980 		if (rc != 0) {
981 			bdev_virtio_tmf_abort(io[i], rc);
982 		}
983 	}
984 
985 	cnt = virtio_recv_pkts(ctrlq, io, io_len, SPDK_COUNTOF(io));
986 	total += cnt;
987 	for (i = 0; i < cnt; ++i) {
988 		bdev_virtio_tmf_cpl(io[i]);
989 	}
990 
991 	cnt = virtio_recv_pkts(eventq, io, io_len, SPDK_COUNTOF(io));
992 	total += cnt;
993 	for (i = 0; i < cnt; ++i) {
994 		bdev_virtio_eventq_io_cpl(svdev, io[i]);
995 	}
996 
997 	return total;
998 }
999 
1000 static int
1001 bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf)
1002 {
1003 	struct virtio_scsi_dev *svdev = io_device;
1004 	struct virtio_dev *vdev = &svdev->vdev;
1005 	struct bdev_virtio_io_channel *ch = ctx_buf;
1006 	struct virtqueue *vq;
1007 	int32_t queue_idx;
1008 
1009 	queue_idx = virtio_dev_find_and_acquire_queue(vdev, VIRTIO_SCSI_REQUESTQ);
1010 	if (queue_idx < 0) {
1011 		SPDK_ERRLOG("Couldn't get an unused queue for the io_channel.\n");
1012 		return -1;
1013 	}
1014 
1015 	vq = vdev->vqs[queue_idx];
1016 
1017 	ch->svdev = svdev;
1018 	ch->vq = vq;
1019 
1020 	ch->poller = spdk_poller_register(bdev_virtio_poll, ch, 0);
1021 
1022 	return 0;
1023 }
1024 
1025 static void
1026 bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf)
1027 {
1028 	struct bdev_virtio_io_channel *ch = ctx_buf;
1029 	struct virtio_scsi_dev *svdev = ch->svdev;
1030 	struct virtio_dev *vdev = &svdev->vdev;
1031 	struct virtqueue *vq = ch->vq;
1032 
1033 	spdk_poller_unregister(&ch->poller);
1034 	virtio_dev_release_queue(vdev, vq->vq_queue_index);
1035 }
1036 
1037 static void
1038 _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum)
1039 {
1040 	struct virtio_scsi_dev *svdev = base->svdev;
1041 	size_t bdevs_cnt;
1042 	struct spdk_bdev *bdevs[BDEV_VIRTIO_MAX_TARGET];
1043 	struct virtio_scsi_disk *disk;
1044 	struct virtio_scsi_scan_info *tgt, *next_tgt;
1045 
1046 	spdk_put_io_channel(spdk_io_channel_from_ctx(base->channel));
1047 	base->svdev->scan_ctx = NULL;
1048 
1049 	TAILQ_FOREACH_SAFE(tgt, &base->scan_queue, tailq, next_tgt) {
1050 		TAILQ_REMOVE(&base->scan_queue, tgt, tailq);
1051 		free(tgt);
1052 	}
1053 
1054 	if (base->cb_fn == NULL) {
1055 		spdk_free(base);
1056 		return;
1057 	}
1058 
1059 	bdevs_cnt = 0;
1060 	if (errnum == 0) {
1061 		TAILQ_FOREACH(disk, &svdev->luns, link) {
1062 			bdevs[bdevs_cnt] = &disk->bdev;
1063 			bdevs_cnt++;
1064 		}
1065 	}
1066 
1067 	base->cb_fn(base->cb_arg, errnum, bdevs, bdevs_cnt);
1068 	spdk_free(base);
1069 }
1070 
1071 static int
1072 send_scan_io(struct virtio_scsi_scan_base *base)
1073 {
1074 	struct virtio_scsi_io_ctx *io_ctx = &base->io_ctx;
1075 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1076 	struct virtqueue *vq = base->channel->vq;
1077 	int payload_iov_cnt = base->iov.iov_len > 0 ? 1 : 0;
1078 	int rc;
1079 
1080 	req->lun[0] = 1;
1081 	req->lun[1] = base->info.target;
1082 
1083 	rc = virtqueue_req_start(vq, io_ctx, 2 + payload_iov_cnt);
1084 	if (rc != 0) {
1085 		base->needs_resend = true;
1086 		return -1;
1087 	}
1088 
1089 	virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO);
1090 	virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
1091 	virtqueue_req_add_iovs(vq, &base->iov, payload_iov_cnt, SPDK_VIRTIO_DESC_WR);
1092 
1093 	virtqueue_req_flush(vq);
1094 	return 0;
1095 }
1096 
1097 static int
1098 send_inquiry(struct virtio_scsi_scan_base *base)
1099 {
1100 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1101 	struct spdk_scsi_cdb_inquiry *cdb;
1102 
1103 	memset(req, 0, sizeof(*req));
1104 
1105 	base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE;
1106 	cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb;
1107 	cdb->opcode = SPDK_SPC_INQUIRY;
1108 	to_be16(cdb->alloc_len, BDEV_VIRTIO_SCAN_PAYLOAD_SIZE);
1109 
1110 	return send_scan_io(base);
1111 }
1112 
1113 static int
1114 send_inquiry_vpd(struct virtio_scsi_scan_base *base, uint8_t page_code)
1115 {
1116 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1117 	struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb;
1118 
1119 	memset(req, 0, sizeof(*req));
1120 
1121 	base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE;
1122 	inquiry_cdb->opcode = SPDK_SPC_INQUIRY;
1123 	inquiry_cdb->evpd = 1;
1124 	inquiry_cdb->page_code = page_code;
1125 	to_be16(inquiry_cdb->alloc_len, base->iov.iov_len);
1126 
1127 	return send_scan_io(base);
1128 }
1129 
1130 static int
1131 send_read_cap_10(struct virtio_scsi_scan_base *base)
1132 {
1133 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1134 
1135 	memset(req, 0, sizeof(*req));
1136 
1137 	base->iov.iov_len = 8;
1138 	req->cdb[0] = SPDK_SBC_READ_CAPACITY_10;
1139 
1140 	return send_scan_io(base);
1141 }
1142 
1143 static int
1144 send_read_cap_16(struct virtio_scsi_scan_base *base)
1145 {
1146 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1147 
1148 	memset(req, 0, sizeof(*req));
1149 
1150 	base->iov.iov_len = 32;
1151 	req->cdb[0] = SPDK_SPC_SERVICE_ACTION_IN_16;
1152 	req->cdb[1] = SPDK_SBC_SAI_READ_CAPACITY_16;
1153 	to_be32(&req->cdb[10], base->iov.iov_len);
1154 
1155 	return send_scan_io(base);
1156 }
1157 
1158 static int
1159 send_test_unit_ready(struct virtio_scsi_scan_base *base)
1160 {
1161 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1162 
1163 	memset(req, 0, sizeof(*req));
1164 	req->cdb[0] = SPDK_SPC_TEST_UNIT_READY;
1165 	base->iov.iov_len = 0;
1166 
1167 	return send_scan_io(base);
1168 }
1169 
1170 static int
1171 send_start_stop_unit(struct virtio_scsi_scan_base *base)
1172 {
1173 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1174 
1175 	memset(req, 0, sizeof(*req));
1176 	req->cdb[0] = SPDK_SBC_START_STOP_UNIT;
1177 	req->cdb[4] = SPDK_SBC_START_STOP_UNIT_START_BIT;
1178 	base->iov.iov_len = 0;
1179 
1180 	return send_scan_io(base);
1181 }
1182 
1183 static int
1184 process_scan_start_stop_unit(struct virtio_scsi_scan_base *base)
1185 {
1186 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1187 
1188 	if (resp->status == SPDK_SCSI_STATUS_GOOD) {
1189 		return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES);
1190 	}
1191 
1192 	return -1;
1193 }
1194 
1195 static int
1196 process_scan_test_unit_ready(struct virtio_scsi_scan_base *base)
1197 {
1198 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1199 	int sk, asc, ascq;
1200 
1201 	get_scsi_status(resp, &sk, &asc, &ascq);
1202 
1203 	/* check response, get VPD if spun up otherwise send SSU */
1204 	if (resp->status == SPDK_SCSI_STATUS_GOOD) {
1205 		return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES);
1206 	} else if (resp->response == VIRTIO_SCSI_S_OK &&
1207 		   resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION &&
1208 		   sk == SPDK_SCSI_SENSE_UNIT_ATTENTION &&
1209 		   asc == SPDK_SCSI_ASC_LOGICAL_UNIT_NOT_READY) {
1210 		return send_start_stop_unit(base);
1211 	} else {
1212 		return -1;
1213 	}
1214 }
1215 
1216 static int
1217 process_scan_inquiry_standard(struct virtio_scsi_scan_base *base)
1218 {
1219 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1220 	struct spdk_scsi_cdb_inquiry_data *inquiry_data =
1221 		(struct spdk_scsi_cdb_inquiry_data *)base->payload;
1222 
1223 	if (resp->status != SPDK_SCSI_STATUS_GOOD) {
1224 		return -1;
1225 	}
1226 
1227 	/* check to make sure its a supported device */
1228 	if (inquiry_data->peripheral_device_type != SPDK_SPC_PERIPHERAL_DEVICE_TYPE_DISK ||
1229 	    inquiry_data->peripheral_qualifier != SPDK_SPC_PERIPHERAL_QUALIFIER_CONNECTED) {
1230 		SPDK_WARNLOG("Unsupported peripheral device type 0x%02x (qualifier 0x%02x)\n",
1231 			     inquiry_data->peripheral_device_type,
1232 			     inquiry_data->peripheral_qualifier);
1233 		return -1;
1234 	}
1235 
1236 	return send_test_unit_ready(base);
1237 }
1238 
1239 static int
1240 process_scan_inquiry_vpd_supported_vpd_pages(struct virtio_scsi_scan_base *base)
1241 {
1242 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1243 	bool block_provisioning_page_supported = false;
1244 
1245 	if (resp->status == SPDK_SCSI_STATUS_GOOD) {
1246 		const uint8_t *vpd_data = base->payload;
1247 		const uint8_t *supported_vpd_pages = vpd_data + 4;
1248 		uint16_t page_length;
1249 		uint16_t num_supported_pages;
1250 		uint16_t i;
1251 
1252 		page_length = from_be16(vpd_data + 2);
1253 		num_supported_pages = spdk_min(page_length, base->iov.iov_len - 4);
1254 
1255 		for (i = 0; i < num_supported_pages; i++) {
1256 			if (supported_vpd_pages[i] == SPDK_SPC_VPD_BLOCK_THIN_PROVISION) {
1257 				block_provisioning_page_supported = true;
1258 				break;
1259 			}
1260 		}
1261 	}
1262 
1263 	if (block_provisioning_page_supported) {
1264 		return send_inquiry_vpd(base, SPDK_SPC_VPD_BLOCK_THIN_PROVISION);
1265 	} else {
1266 		return send_read_cap_10(base);
1267 	}
1268 }
1269 
1270 static int
1271 process_scan_inquiry_vpd_block_thin_provision(struct virtio_scsi_scan_base *base)
1272 {
1273 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1274 
1275 	base->info.unmap_supported = false;
1276 
1277 	if (resp->status == SPDK_SCSI_STATUS_GOOD) {
1278 		uint8_t *vpd_data = base->payload;
1279 
1280 		base->info.unmap_supported = !!(vpd_data[5] & SPDK_SCSI_UNMAP_LBPU);
1281 	}
1282 
1283 	SPDK_INFOLOG(SPDK_LOG_VIRTIO, "Target %u: unmap supported = %d\n",
1284 		     base->info.target, (int)base->info.unmap_supported);
1285 
1286 	return send_read_cap_10(base);
1287 }
1288 
1289 static int
1290 process_scan_inquiry(struct virtio_scsi_scan_base *base)
1291 {
1292 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1293 	struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb;
1294 
1295 	if ((inquiry_cdb->evpd & 1) == 0) {
1296 		return process_scan_inquiry_standard(base);
1297 	}
1298 
1299 	switch (inquiry_cdb->page_code) {
1300 	case SPDK_SPC_VPD_SUPPORTED_VPD_PAGES:
1301 		return process_scan_inquiry_vpd_supported_vpd_pages(base);
1302 	case SPDK_SPC_VPD_BLOCK_THIN_PROVISION:
1303 		return process_scan_inquiry_vpd_block_thin_provision(base);
1304 	default:
1305 		SPDK_DEBUGLOG(SPDK_LOG_VIRTIO, "Unexpected VPD page 0x%02x\n", inquiry_cdb->page_code);
1306 		return -1;
1307 	}
1308 }
1309 
1310 static void
1311 bdev_virtio_disc_notify_remove(void *remove_ctx)
1312 {
1313 	struct virtio_scsi_disk *disk = remove_ctx;
1314 
1315 	disk->removed = true;
1316 	spdk_bdev_close(disk->notify_desc);
1317 }
1318 
1319 /* To be called only from the thread performing target scan */
1320 static int
1321 virtio_scsi_dev_add_tgt(struct virtio_scsi_dev *svdev, struct virtio_scsi_scan_info *info)
1322 {
1323 	struct virtio_scsi_disk *disk;
1324 	struct spdk_bdev *bdev;
1325 	int rc;
1326 
1327 	TAILQ_FOREACH(disk, &svdev->luns, link) {
1328 		if (disk->info.target == info->target) {
1329 			/* Target is already attached and param change is not supported */
1330 			return 0;
1331 		}
1332 	}
1333 
1334 	if (info->block_size == 0 || info->num_blocks == 0) {
1335 		SPDK_ERRLOG("%s: invalid target %u: bs=%"PRIu32" blocks=%"PRIu64"\n",
1336 			    svdev->vdev.name, info->target, info->block_size, info->num_blocks);
1337 		return -EINVAL;
1338 	}
1339 
1340 	disk = calloc(1, sizeof(*disk));
1341 	if (disk == NULL) {
1342 		SPDK_ERRLOG("could not allocate disk\n");
1343 		return -ENOMEM;
1344 	}
1345 
1346 	disk->svdev = svdev;
1347 	memcpy(&disk->info, info, sizeof(*info));
1348 
1349 	bdev = &disk->bdev;
1350 	bdev->name = spdk_sprintf_alloc("%st%"PRIu8, svdev->vdev.name, info->target);
1351 	if (bdev->name == NULL) {
1352 		SPDK_ERRLOG("Couldn't alloc memory for the bdev name.\n");
1353 		free(disk);
1354 		return -ENOMEM;
1355 	}
1356 
1357 	bdev->product_name = "Virtio SCSI Disk";
1358 	bdev->write_cache = 0;
1359 	bdev->blocklen = disk->info.block_size;
1360 	bdev->blockcnt = disk->info.num_blocks;
1361 
1362 	bdev->ctxt = disk;
1363 	bdev->fn_table = &virtio_fn_table;
1364 	bdev->module = &virtio_scsi_if;
1365 
1366 	rc = spdk_bdev_register(&disk->bdev);
1367 	if (rc) {
1368 		SPDK_ERRLOG("Failed to register bdev name=%s\n", disk->bdev.name);
1369 		free(bdev->name);
1370 		free(disk);
1371 		return rc;
1372 	}
1373 
1374 	rc = spdk_bdev_open(bdev, false, bdev_virtio_disc_notify_remove, disk, &disk->notify_desc);
1375 	if (rc) {
1376 		assert(false);
1377 	}
1378 
1379 	TAILQ_INSERT_TAIL(&svdev->luns, disk, link);
1380 	return 0;
1381 }
1382 
1383 static int
1384 process_read_cap_10(struct virtio_scsi_scan_base *base)
1385 {
1386 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1387 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1388 	uint64_t max_block;
1389 	uint32_t block_size;
1390 	uint8_t target_id = req->lun[1];
1391 	int rc;
1392 
1393 	if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) {
1394 		SPDK_ERRLOG("READ CAPACITY (10) failed for target %"PRIu8".\n", target_id);
1395 		return -1;
1396 	}
1397 
1398 	block_size = from_be32(base->payload + 4);
1399 	max_block = from_be32(base->payload);
1400 
1401 	if (max_block == 0xffffffff) {
1402 		return send_read_cap_16(base);
1403 	}
1404 
1405 	base->info.num_blocks = (uint64_t)max_block + 1;
1406 	base->info.block_size = block_size;
1407 
1408 	rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info);
1409 	if (rc != 0) {
1410 		return rc;
1411 	}
1412 
1413 	return _virtio_scsi_dev_scan_next(base, 0);
1414 }
1415 
1416 static int
1417 process_read_cap_16(struct virtio_scsi_scan_base *base)
1418 {
1419 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1420 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1421 	uint8_t target_id = req->lun[1];
1422 	int rc;
1423 
1424 	if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) {
1425 		SPDK_ERRLOG("READ CAPACITY (16) failed for target %"PRIu8".\n", target_id);
1426 		return -1;
1427 	}
1428 
1429 	base->info.num_blocks = from_be64(base->payload) + 1;
1430 	base->info.block_size = from_be32(base->payload + 8);
1431 	rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info);
1432 	if (rc != 0) {
1433 		return rc;
1434 	}
1435 
1436 	return _virtio_scsi_dev_scan_next(base, 0);
1437 }
1438 
1439 static void
1440 process_scan_resp(struct virtio_scsi_scan_base *base)
1441 {
1442 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1443 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1444 	int rc, sk, asc, ascq;
1445 	uint8_t target_id;
1446 
1447 	if (base->io_ctx.iov_req.iov_len < sizeof(struct virtio_scsi_cmd_req) ||
1448 	    base->io_ctx.iov_resp.iov_len < sizeof(struct virtio_scsi_cmd_resp)) {
1449 		SPDK_ERRLOG("Received target scan message with invalid length.\n");
1450 		_virtio_scsi_dev_scan_next(base, -EIO);
1451 		return;
1452 	}
1453 
1454 	get_scsi_status(resp, &sk, &asc, &ascq);
1455 	target_id = req->lun[1];
1456 
1457 	if (resp->response == VIRTIO_SCSI_S_BAD_TARGET ||
1458 	    resp->response == VIRTIO_SCSI_S_INCORRECT_LUN) {
1459 		_virtio_scsi_dev_scan_next(base, -ENODEV);
1460 		return;
1461 	}
1462 
1463 	if (resp->response != VIRTIO_SCSI_S_OK ||
1464 	    (resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION &&
1465 	     sk != SPDK_SCSI_SENSE_ILLEGAL_REQUEST)) {
1466 		assert(base->retries > 0);
1467 		base->retries--;
1468 		if (base->retries == 0) {
1469 			SPDK_NOTICELOG("Target %"PRIu8" is present, but unavailable.\n", target_id);
1470 			SPDK_LOGDUMP(SPDK_LOG_VIRTIO, "CDB", req->cdb, sizeof(req->cdb));
1471 			SPDK_LOGDUMP(SPDK_LOG_VIRTIO, "SENSE DATA", resp->sense, sizeof(resp->sense));
1472 			_virtio_scsi_dev_scan_next(base, -EBUSY);
1473 			return;
1474 		}
1475 
1476 		/* resend the same request */
1477 		rc = send_scan_io(base);
1478 		if (rc != 0) {
1479 			/* Let response poller do the resend */
1480 		}
1481 		return;
1482 	}
1483 
1484 	base->retries = SCAN_REQUEST_RETRIES;
1485 
1486 	switch (req->cdb[0]) {
1487 	case SPDK_SPC_INQUIRY:
1488 		rc = process_scan_inquiry(base);
1489 		break;
1490 	case SPDK_SPC_TEST_UNIT_READY:
1491 		rc = process_scan_test_unit_ready(base);
1492 		break;
1493 	case SPDK_SBC_START_STOP_UNIT:
1494 		rc = process_scan_start_stop_unit(base);
1495 		break;
1496 	case SPDK_SBC_READ_CAPACITY_10:
1497 		rc = process_read_cap_10(base);
1498 		break;
1499 	case SPDK_SPC_SERVICE_ACTION_IN_16:
1500 		rc = process_read_cap_16(base);
1501 		break;
1502 	default:
1503 		SPDK_ERRLOG("Received invalid target scan message: cdb[0] = %"PRIu8".\n", req->cdb[0]);
1504 		rc = -1;
1505 		break;
1506 	}
1507 
1508 	if (rc != 0) {
1509 		if (base->needs_resend) {
1510 			return; /* Let response poller do the resend */
1511 		}
1512 
1513 		_virtio_scsi_dev_scan_next(base, rc);
1514 	}
1515 }
1516 
1517 static int
1518 _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc)
1519 {
1520 	struct virtio_scsi_scan_info *next;
1521 	struct virtio_scsi_disk *disk;
1522 	uint8_t target_id;
1523 
1524 	if (base->full_scan) {
1525 		if (rc != 0) {
1526 			disk = virtio_scsi_dev_get_disk_by_id(base->svdev,
1527 							      base->info.target);
1528 			if (disk != NULL) {
1529 				spdk_bdev_unregister(&disk->bdev, NULL, NULL);
1530 			}
1531 		}
1532 
1533 		target_id = base->info.target + 1;
1534 		if (target_id < BDEV_VIRTIO_MAX_TARGET) {
1535 			_virtio_scsi_dev_scan_tgt(base, target_id);
1536 			return 0;
1537 		}
1538 
1539 		base->full_scan = false;
1540 	}
1541 
1542 	next = TAILQ_FIRST(&base->scan_queue);
1543 	if (next == NULL) {
1544 		_virtio_scsi_dev_scan_finish(base, 0);
1545 		return 0;
1546 	}
1547 
1548 	TAILQ_REMOVE(&base->scan_queue, next, tailq);
1549 	target_id = next->target;
1550 	free(next);
1551 
1552 	_virtio_scsi_dev_scan_tgt(base, target_id);
1553 	return 0;
1554 }
1555 
1556 static int
1557 virtio_pci_scsi_dev_enumerate_cb(struct virtio_pci_ctx *pci_ctx, void *ctx)
1558 {
1559 	struct virtio_scsi_dev *svdev;
1560 
1561 	svdev = virtio_pci_scsi_dev_create(NULL, pci_ctx);
1562 	return svdev == NULL ? -1 : 0;
1563 }
1564 
1565 static int
1566 bdev_virtio_process_config(void)
1567 {
1568 	struct spdk_conf_section *sp;
1569 	struct virtio_scsi_dev *svdev;
1570 	char *default_name = NULL;
1571 	char *path, *type, *name;
1572 	unsigned vdev_num;
1573 	int num_queues;
1574 	bool enable_pci;
1575 	int rc = 0;
1576 
1577 	for (sp = spdk_conf_first_section(NULL); sp != NULL; sp = spdk_conf_next_section(sp)) {
1578 		if (!spdk_conf_section_match_prefix(sp, "VirtioUser")) {
1579 			continue;
1580 		}
1581 
1582 		if (sscanf(spdk_conf_section_get_name(sp), "VirtioUser%u", &vdev_num) != 1) {
1583 			SPDK_ERRLOG("Section '%s' has non-numeric suffix.\n",
1584 				    spdk_conf_section_get_name(sp));
1585 			rc = -1;
1586 			goto out;
1587 		}
1588 
1589 		path = spdk_conf_section_get_val(sp, "Path");
1590 		if (path == NULL) {
1591 			SPDK_ERRLOG("VirtioUser%u: missing Path\n", vdev_num);
1592 			rc = -1;
1593 			goto out;
1594 		}
1595 
1596 		type = spdk_conf_section_get_val(sp, "Type");
1597 		if (type != NULL && strcmp(type, "SCSI") != 0) {
1598 			continue;
1599 		}
1600 
1601 		num_queues = spdk_conf_section_get_intval(sp, "Queues");
1602 		if (num_queues < 1) {
1603 			num_queues = 1;
1604 		} else if (num_queues > SPDK_VIRTIO_MAX_VIRTQUEUES) {
1605 			num_queues = SPDK_VIRTIO_MAX_VIRTQUEUES;
1606 		}
1607 
1608 		name = spdk_conf_section_get_val(sp, "Name");
1609 		if (name == NULL) {
1610 			default_name = spdk_sprintf_alloc("VirtioScsi%u", vdev_num);
1611 			name = default_name;
1612 		}
1613 
1614 		svdev = virtio_user_scsi_dev_create(name, path, num_queues, 512);
1615 		free(default_name);
1616 		default_name = NULL;
1617 
1618 		if (svdev == NULL) {
1619 			rc = -1;
1620 			goto out;
1621 		}
1622 	}
1623 
1624 	sp = spdk_conf_find_section(NULL, "VirtioPci");
1625 	if (sp == NULL) {
1626 		return 0;
1627 	}
1628 
1629 	enable_pci = spdk_conf_section_get_boolval(sp, "Enable", false);
1630 	if (enable_pci) {
1631 		rc = virtio_pci_dev_enumerate(virtio_pci_scsi_dev_enumerate_cb, NULL,
1632 					      PCI_DEVICE_ID_VIRTIO_SCSI_MODERN);
1633 	}
1634 
1635 out:
1636 	return rc;
1637 }
1638 
1639 static int
1640 _virtio_scsi_dev_scan_init(struct virtio_scsi_dev *svdev)
1641 {
1642 	struct virtio_scsi_scan_base *base;
1643 	struct spdk_io_channel *io_ch;
1644 	struct virtio_scsi_io_ctx *io_ctx;
1645 	struct virtio_scsi_cmd_req *req;
1646 	struct virtio_scsi_cmd_resp *resp;
1647 
1648 	io_ch = spdk_get_io_channel(svdev);
1649 	if (io_ch == NULL) {
1650 		return -EBUSY;
1651 	}
1652 
1653 	base = spdk_zmalloc(sizeof(*base), 64, NULL,
1654 			    SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1655 	if (base == NULL) {
1656 		SPDK_ERRLOG("couldn't allocate memory for scsi target scan.\n");
1657 		return -ENOMEM;
1658 	}
1659 
1660 	base->svdev = svdev;
1661 
1662 	base->channel = spdk_io_channel_get_ctx(io_ch);
1663 	TAILQ_INIT(&base->scan_queue);
1664 	svdev->scan_ctx = base;
1665 
1666 	base->iov.iov_base = base->payload;
1667 	io_ctx = &base->io_ctx;
1668 	req = &io_ctx->req;
1669 	resp = &io_ctx->resp;
1670 	io_ctx->iov_req.iov_base = req;
1671 	io_ctx->iov_req.iov_len = sizeof(*req);
1672 	io_ctx->iov_resp.iov_base = resp;
1673 	io_ctx->iov_resp.iov_len = sizeof(*resp);
1674 
1675 	base->retries = SCAN_REQUEST_RETRIES;
1676 	return 0;
1677 }
1678 
1679 static void
1680 _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target)
1681 {
1682 	int rc;
1683 
1684 	memset(&base->info, 0, sizeof(base->info));
1685 	base->info.target = target;
1686 
1687 	rc = send_inquiry(base);
1688 	if (rc) {
1689 		/* Let response poller do the resend */
1690 	}
1691 }
1692 
1693 static int
1694 virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev, bdev_virtio_create_cb cb_fn,
1695 		     void *cb_arg)
1696 {
1697 	struct virtio_scsi_scan_base *base;
1698 	struct virtio_scsi_scan_info *tgt, *next_tgt;
1699 	int rc;
1700 
1701 	if (svdev->scan_ctx) {
1702 		if (svdev->scan_ctx->full_scan) {
1703 			return -EEXIST;
1704 		}
1705 
1706 		/* We're about to start a full rescan, so there's no need
1707 		 * to scan particular targets afterwards.
1708 		 */
1709 		TAILQ_FOREACH_SAFE(tgt, &svdev->scan_ctx->scan_queue, tailq, next_tgt) {
1710 			TAILQ_REMOVE(&svdev->scan_ctx->scan_queue, tgt, tailq);
1711 			free(tgt);
1712 		}
1713 
1714 		svdev->scan_ctx->cb_fn = cb_fn;
1715 		svdev->scan_ctx->cb_arg = cb_arg;
1716 		svdev->scan_ctx->restart = true;
1717 		return 0;
1718 	}
1719 
1720 	rc = _virtio_scsi_dev_scan_init(svdev);
1721 	if (rc != 0) {
1722 		return rc;
1723 	}
1724 
1725 	base = svdev->scan_ctx;
1726 	base->cb_fn = cb_fn;
1727 	base->cb_arg = cb_arg;
1728 	base->full_scan = true;
1729 
1730 	_virtio_scsi_dev_scan_tgt(base, 0);
1731 	return 0;
1732 }
1733 
1734 static int
1735 virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target)
1736 {
1737 	struct virtio_scsi_scan_base *base;
1738 	struct virtio_scsi_scan_info *info;
1739 	int rc;
1740 
1741 	base = svdev->scan_ctx;
1742 	if (base) {
1743 		info = calloc(1, sizeof(*info));
1744 		if (info == NULL) {
1745 			SPDK_ERRLOG("calloc failed\n");
1746 			return -ENOMEM;
1747 		}
1748 
1749 		info->target = target;
1750 		TAILQ_INSERT_TAIL(&base->scan_queue, info, tailq);
1751 		return 0;
1752 	}
1753 
1754 	rc = _virtio_scsi_dev_scan_init(svdev);
1755 	if (rc != 0) {
1756 		return rc;
1757 	}
1758 
1759 	base = svdev->scan_ctx;
1760 	base->full_scan = true;
1761 	_virtio_scsi_dev_scan_tgt(base, target);
1762 	return 0;
1763 }
1764 
1765 static void
1766 bdev_virtio_initial_scan_complete(void *ctx, int result,
1767 				  struct spdk_bdev **bdevs, size_t bdevs_cnt)
1768 {
1769 	struct virtio_scsi_dev *svdev;
1770 
1771 	pthread_mutex_lock(&g_virtio_scsi_mutex);
1772 	TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) {
1773 		if (svdev->scan_ctx) {
1774 			/* another device is still being scanned */
1775 			pthread_mutex_unlock(&g_virtio_scsi_mutex);
1776 			return;
1777 		}
1778 	}
1779 
1780 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
1781 	spdk_bdev_module_init_done(&virtio_scsi_if);
1782 }
1783 
1784 static int
1785 bdev_virtio_initialize(void)
1786 {
1787 	struct virtio_scsi_dev *svdev, *next_svdev;
1788 	int rc;
1789 
1790 	rc = bdev_virtio_process_config();
1791 	pthread_mutex_lock(&g_virtio_scsi_mutex);
1792 
1793 	if (rc != 0) {
1794 		goto err_unlock;
1795 	}
1796 
1797 	if (TAILQ_EMPTY(&g_virtio_scsi_devs)) {
1798 		goto out_unlock;
1799 	}
1800 
1801 	/* Initialize all created devices and scan available targets */
1802 	TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) {
1803 		rc = virtio_scsi_dev_scan(svdev, bdev_virtio_initial_scan_complete, NULL);
1804 		if (rc != 0) {
1805 			goto err_unlock;
1806 		}
1807 	}
1808 
1809 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
1810 	return 0;
1811 
1812 err_unlock:
1813 	/* Remove any created devices */
1814 	TAILQ_FOREACH_SAFE(svdev, &g_virtio_scsi_devs, tailq, next_svdev) {
1815 		virtio_scsi_dev_remove(svdev, NULL, NULL);
1816 	}
1817 
1818 out_unlock:
1819 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
1820 	spdk_bdev_module_init_done(&virtio_scsi_if);
1821 	return rc;
1822 }
1823 
1824 static void
1825 _virtio_scsi_dev_unregister_cb(void *io_device)
1826 {
1827 	struct virtio_scsi_dev *svdev = io_device;
1828 	struct virtio_dev *vdev = &svdev->vdev;
1829 	bool finish_module;
1830 	bdev_virtio_remove_cb remove_cb;
1831 	void *remove_ctx;
1832 
1833 	assert(spdk_ring_count(svdev->ctrlq_ring) == 0);
1834 	spdk_ring_free(svdev->ctrlq_ring);
1835 	spdk_poller_unregister(&svdev->mgmt_poller);
1836 
1837 	virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ);
1838 	virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ);
1839 
1840 	virtio_dev_stop(vdev);
1841 	virtio_dev_destruct(vdev);
1842 
1843 	pthread_mutex_lock(&g_virtio_scsi_mutex);
1844 	TAILQ_REMOVE(&g_virtio_scsi_devs, svdev, tailq);
1845 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
1846 
1847 	remove_cb = svdev->remove_cb;
1848 	remove_ctx = svdev->remove_ctx;
1849 	spdk_free(svdev->eventq_ios);
1850 	free(svdev);
1851 
1852 	if (remove_cb) {
1853 		remove_cb(remove_ctx, 0);
1854 	}
1855 
1856 	finish_module = TAILQ_EMPTY(&g_virtio_scsi_devs);
1857 
1858 	if (g_bdev_virtio_finish && finish_module) {
1859 		spdk_bdev_module_finish_done();
1860 	}
1861 }
1862 
1863 static void
1864 virtio_scsi_dev_unregister_cb(void *io_device)
1865 {
1866 	struct virtio_scsi_dev *svdev = io_device;
1867 	struct spdk_thread *thread;
1868 
1869 	thread = virtio_dev_queue_get_thread(&svdev->vdev, VIRTIO_SCSI_CONTROLQ);
1870 	spdk_thread_send_msg(thread, _virtio_scsi_dev_unregister_cb, io_device);
1871 }
1872 
1873 static void
1874 virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev,
1875 		       bdev_virtio_remove_cb cb_fn, void *cb_arg)
1876 {
1877 	struct virtio_scsi_disk *disk, *disk_tmp;
1878 	bool do_remove = true;
1879 
1880 	if (svdev->removed) {
1881 		if (cb_fn) {
1882 			cb_fn(cb_arg, -EBUSY);
1883 		}
1884 		return;
1885 	}
1886 
1887 	svdev->remove_cb = cb_fn;
1888 	svdev->remove_ctx = cb_arg;
1889 	svdev->removed = true;
1890 
1891 	if (svdev->scan_ctx) {
1892 		/* The removal will continue after we receive a pending scan I/O. */
1893 		return;
1894 	}
1895 
1896 	TAILQ_FOREACH_SAFE(disk, &svdev->luns, link, disk_tmp) {
1897 		if (!disk->removed) {
1898 			spdk_bdev_unregister(&disk->bdev, NULL, NULL);
1899 		}
1900 		do_remove = false;
1901 	}
1902 
1903 	if (do_remove) {
1904 		spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb);
1905 	}
1906 }
1907 
1908 static void
1909 bdev_virtio_finish(void)
1910 {
1911 	struct virtio_scsi_dev *svdev, *next;
1912 
1913 	g_bdev_virtio_finish = true;
1914 
1915 	pthread_mutex_lock(&g_virtio_scsi_mutex);
1916 	if (TAILQ_EMPTY(&g_virtio_scsi_devs)) {
1917 		pthread_mutex_unlock(&g_virtio_scsi_mutex);
1918 		spdk_bdev_module_finish_done();
1919 		return;
1920 	}
1921 
1922 	/* Defer module finish until all controllers are removed. */
1923 	TAILQ_FOREACH_SAFE(svdev, &g_virtio_scsi_devs, tailq, next) {
1924 		virtio_scsi_dev_remove(svdev, NULL, NULL);
1925 	}
1926 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
1927 }
1928 
1929 int
1930 bdev_virtio_user_scsi_dev_create(const char *base_name, const char *path,
1931 				 unsigned num_queues, unsigned queue_size,
1932 				 bdev_virtio_create_cb cb_fn, void *cb_arg)
1933 {
1934 	struct virtio_scsi_dev *svdev;
1935 	int rc;
1936 
1937 	svdev = virtio_user_scsi_dev_create(base_name, path, num_queues, queue_size);
1938 	if (svdev == NULL) {
1939 		return -1;
1940 	}
1941 
1942 	rc = virtio_scsi_dev_scan(svdev, cb_fn, cb_arg);
1943 	if (rc) {
1944 		virtio_scsi_dev_remove(svdev, NULL, NULL);
1945 	}
1946 
1947 	return rc;
1948 }
1949 
1950 struct bdev_virtio_pci_dev_create_ctx {
1951 	const char *name;
1952 	bdev_virtio_create_cb cb_fn;
1953 	void *cb_arg;
1954 };
1955 
1956 static int
1957 bdev_virtio_pci_scsi_dev_create_cb(struct virtio_pci_ctx *pci_ctx, void *ctx)
1958 {
1959 	struct virtio_scsi_dev *svdev;
1960 	struct bdev_virtio_pci_dev_create_ctx *create_ctx = ctx;
1961 	int rc;
1962 
1963 	svdev = virtio_pci_scsi_dev_create(create_ctx->name, pci_ctx);
1964 	if (svdev == NULL) {
1965 		return -1;
1966 	}
1967 
1968 	rc = virtio_scsi_dev_scan(svdev, create_ctx->cb_fn, create_ctx->cb_arg);
1969 	if (rc) {
1970 		virtio_scsi_dev_remove(svdev, NULL, NULL);
1971 	}
1972 
1973 	return rc;
1974 }
1975 
1976 int
1977 bdev_virtio_pci_scsi_dev_create(const char *name, struct spdk_pci_addr *pci_addr,
1978 				bdev_virtio_create_cb cb_fn, void *cb_arg)
1979 {
1980 	struct bdev_virtio_pci_dev_create_ctx create_ctx;
1981 
1982 	create_ctx.name = name;
1983 	create_ctx.cb_fn = cb_fn;
1984 	create_ctx.cb_arg = cb_arg;
1985 
1986 	return virtio_pci_dev_attach(bdev_virtio_pci_scsi_dev_create_cb, &create_ctx,
1987 				     PCI_DEVICE_ID_VIRTIO_SCSI_MODERN, pci_addr);
1988 }
1989 
1990 int
1991 bdev_virtio_scsi_dev_remove(const char *name, bdev_virtio_remove_cb cb_fn, void *cb_arg)
1992 {
1993 	struct virtio_scsi_dev *svdev;
1994 
1995 	pthread_mutex_lock(&g_virtio_scsi_mutex);
1996 	TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) {
1997 		if (strcmp(svdev->vdev.name, name) == 0) {
1998 			break;
1999 		}
2000 	}
2001 
2002 	if (svdev == NULL) {
2003 		pthread_mutex_unlock(&g_virtio_scsi_mutex);
2004 		SPDK_ERRLOG("Cannot find Virtio-SCSI device named '%s'\n", name);
2005 		return -ENODEV;
2006 	}
2007 
2008 	virtio_scsi_dev_remove(svdev, cb_fn, cb_arg);
2009 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
2010 
2011 	return 0;
2012 }
2013 
2014 void
2015 bdev_virtio_scsi_dev_list(struct spdk_json_write_ctx *w)
2016 {
2017 	struct virtio_scsi_dev *svdev;
2018 
2019 	spdk_json_write_array_begin(w);
2020 
2021 	pthread_mutex_lock(&g_virtio_scsi_mutex);
2022 	TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) {
2023 		spdk_json_write_object_begin(w);
2024 
2025 		spdk_json_write_named_string(w, "name", svdev->vdev.name);
2026 
2027 		virtio_dev_dump_json_info(&svdev->vdev, w);
2028 
2029 		spdk_json_write_object_end(w);
2030 	}
2031 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
2032 
2033 	spdk_json_write_array_end(w);
2034 }
2035 
2036 SPDK_LOG_REGISTER_COMPONENT("virtio", SPDK_LOG_VIRTIO)
2037