xref: /spdk/module/bdev/virtio/bdev_virtio_scsi.c (revision 0098e636761237b77c12c30c2408263a5d2260cc)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/bdev.h"
9 #include "spdk/endian.h"
10 #include "spdk/env.h"
11 #include "spdk/thread.h"
12 #include "spdk/scsi_spec.h"
13 #include "spdk/string.h"
14 #include "spdk/util.h"
15 #include "spdk/json.h"
16 
17 #include "spdk/bdev_module.h"
18 #include "spdk/log.h"
19 #include "spdk_internal/virtio.h"
20 #include "spdk_internal/vhost_user.h"
21 
22 #include <linux/virtio_scsi.h>
23 #include <linux/virtio_ids.h>
24 
25 #include "bdev_virtio.h"
26 
27 #define BDEV_VIRTIO_MAX_TARGET 64
28 #define BDEV_VIRTIO_SCAN_PAYLOAD_SIZE 256
29 #define MGMT_POLL_PERIOD_US (1000 * 5)
30 #define CTRLQ_RING_SIZE 16
31 #define SCAN_REQUEST_RETRIES 5
32 
33 /* Number of non-request queues - eventq and controlq */
34 #define SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED 2
35 
36 #define VIRTIO_SCSI_EVENTQ_BUFFER_COUNT 16
37 
38 #define VIRTIO_SCSI_CONTROLQ	0
39 #define VIRTIO_SCSI_EVENTQ	1
40 #define VIRTIO_SCSI_REQUESTQ	2
41 
42 static int bdev_virtio_initialize(void);
43 static void bdev_virtio_finish(void);
44 
45 struct virtio_scsi_dev {
46 	/* Generic virtio device data. */
47 	struct virtio_dev		vdev;
48 
49 	/** Detected SCSI LUNs */
50 	TAILQ_HEAD(, virtio_scsi_disk)	luns;
51 
52 	/** Context for the SCSI target scan. */
53 	struct virtio_scsi_scan_base	*scan_ctx;
54 
55 	/** Controlq poller. */
56 	struct spdk_poller		*mgmt_poller;
57 
58 	/** Controlq messages to be sent. */
59 	struct spdk_ring		*ctrlq_ring;
60 
61 	/** Buffers for the eventq. */
62 	struct virtio_scsi_eventq_io	*eventq_ios;
63 
64 	/** Device marked for removal. */
65 	bool				removed;
66 
67 	/** Callback to be called after vdev removal. */
68 	bdev_virtio_remove_cb		remove_cb;
69 
70 	/** Context for the `remove_cb`. */
71 	void				*remove_ctx;
72 
73 	TAILQ_ENTRY(virtio_scsi_dev) tailq;
74 };
75 
76 struct virtio_scsi_io_ctx {
77 	struct iovec			iov_req;
78 	struct iovec			iov_resp;
79 	union {
80 		struct virtio_scsi_cmd_req req;
81 		struct virtio_scsi_ctrl_tmf_req tmf_req;
82 	};
83 	union {
84 		struct virtio_scsi_cmd_resp resp;
85 		struct virtio_scsi_ctrl_tmf_resp tmf_resp;
86 	};
87 };
88 
89 struct virtio_scsi_eventq_io {
90 	struct iovec			iov;
91 	struct virtio_scsi_event	ev;
92 };
93 
94 struct virtio_scsi_scan_info {
95 	uint64_t			num_blocks;
96 	uint32_t			block_size;
97 	uint8_t				target;
98 	bool				unmap_supported;
99 	TAILQ_ENTRY(virtio_scsi_scan_info) tailq;
100 };
101 
102 struct virtio_scsi_scan_base {
103 	struct virtio_scsi_dev		*svdev;
104 
105 	/** I/O channel used for the scan I/O. */
106 	struct bdev_virtio_io_channel	*channel;
107 
108 	bdev_virtio_create_cb		cb_fn;
109 	void				*cb_arg;
110 
111 	/** Scan all targets on the device. */
112 	bool				full_scan;
113 
114 	/** Start a full rescan after receiving next scan I/O response. */
115 	bool				restart;
116 
117 	/** Additional targets to be (re)scanned. */
118 	TAILQ_HEAD(, virtio_scsi_scan_info) scan_queue;
119 
120 	/** Remaining attempts for sending the current request. */
121 	unsigned                        retries;
122 
123 	/** If set, the last scan I/O needs to be resent */
124 	bool				needs_resend;
125 
126 	struct virtio_scsi_io_ctx	io_ctx;
127 	struct iovec			iov;
128 	uint8_t				payload[BDEV_VIRTIO_SCAN_PAYLOAD_SIZE];
129 
130 	/** Scan results for the current target. */
131 	struct virtio_scsi_scan_info	info;
132 };
133 
134 struct virtio_scsi_disk {
135 	struct spdk_bdev		bdev;
136 	struct virtio_scsi_dev		*svdev;
137 	struct virtio_scsi_scan_info	info;
138 
139 	/** Descriptor opened just to be notified of external bdev hotremove. */
140 	struct spdk_bdev_desc		*notify_desc;
141 
142 	/** Disk marked for removal. */
143 	bool				removed;
144 	TAILQ_ENTRY(virtio_scsi_disk)	link;
145 };
146 
147 struct bdev_virtio_io_channel {
148 	struct virtio_scsi_dev	*svdev;
149 
150 	/** Virtqueue exclusively assigned to this channel. */
151 	struct virtqueue	*vq;
152 
153 	/** Virtio response poller. */
154 	struct spdk_poller	*poller;
155 };
156 
157 static TAILQ_HEAD(, virtio_scsi_dev) g_virtio_scsi_devs =
158 	TAILQ_HEAD_INITIALIZER(g_virtio_scsi_devs);
159 
160 static pthread_mutex_t g_virtio_scsi_mutex = PTHREAD_MUTEX_INITIALIZER;
161 
162 /** Module finish in progress */
163 static bool g_bdev_virtio_finish = false;
164 
165 /* Features desired/implemented by this driver. */
166 #define VIRTIO_SCSI_DEV_SUPPORTED_FEATURES		\
167 	(1ULL << VIRTIO_SCSI_F_INOUT		|	\
168 	 1ULL << VIRTIO_SCSI_F_HOTPLUG		|	\
169 	 1ULL << VIRTIO_RING_F_EVENT_IDX	|	\
170 	 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
171 
172 static void virtio_scsi_dev_unregister_cb(void *io_device);
173 static void virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev,
174 				   bdev_virtio_remove_cb cb_fn, void *cb_arg);
175 static int bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf);
176 static void bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf);
177 static void process_scan_resp(struct virtio_scsi_scan_base *base);
178 static int bdev_virtio_mgmt_poll(void *arg);
179 
180 static int
181 virtio_scsi_dev_send_eventq_io(struct virtqueue *vq, struct virtio_scsi_eventq_io *io)
182 {
183 	int rc;
184 
185 	rc = virtqueue_req_start(vq, io, 1);
186 	if (rc != 0) {
187 		return -1;
188 	}
189 
190 	virtqueue_req_add_iovs(vq, &io->iov, 1, SPDK_VIRTIO_DESC_WR);
191 	virtqueue_req_flush(vq);
192 
193 	return 0;
194 }
195 
196 static int
197 virtio_scsi_dev_init(struct virtio_scsi_dev *svdev, uint16_t max_queues)
198 {
199 	struct virtio_dev *vdev = &svdev->vdev;
200 	struct spdk_ring *ctrlq_ring;
201 	struct virtio_scsi_eventq_io *eventq_io;
202 	struct virtqueue *eventq;
203 	uint16_t i, num_events;
204 	int rc;
205 
206 	rc = virtio_dev_reset(vdev, VIRTIO_SCSI_DEV_SUPPORTED_FEATURES);
207 	if (rc != 0) {
208 		return rc;
209 	}
210 
211 	rc = virtio_dev_start(vdev, max_queues, SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED);
212 	if (rc != 0) {
213 		return rc;
214 	}
215 
216 	ctrlq_ring = spdk_ring_create(SPDK_RING_TYPE_MP_SC, CTRLQ_RING_SIZE,
217 				      SPDK_ENV_SOCKET_ID_ANY);
218 	if (ctrlq_ring == NULL) {
219 		SPDK_ERRLOG("Failed to allocate send ring for the controlq.\n");
220 		return -1;
221 	}
222 
223 	rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_CONTROLQ);
224 	if (rc != 0) {
225 		SPDK_ERRLOG("Failed to acquire the controlq.\n");
226 		spdk_ring_free(ctrlq_ring);
227 		return -1;
228 	}
229 
230 	rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_EVENTQ);
231 	if (rc != 0) {
232 		SPDK_ERRLOG("Failed to acquire the eventq.\n");
233 		virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ);
234 		spdk_ring_free(ctrlq_ring);
235 		return -1;
236 	}
237 
238 	eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ];
239 	num_events = spdk_min(eventq->vq_nentries, VIRTIO_SCSI_EVENTQ_BUFFER_COUNT);
240 	svdev->eventq_ios = spdk_zmalloc(sizeof(*svdev->eventq_ios) * num_events,
241 					 0, NULL, SPDK_ENV_LCORE_ID_ANY,
242 					 SPDK_MALLOC_DMA);
243 	if (svdev->eventq_ios == NULL) {
244 		SPDK_ERRLOG("cannot allocate memory for %"PRIu16" eventq buffers\n",
245 			    num_events);
246 		virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ);
247 		virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ);
248 		spdk_ring_free(ctrlq_ring);
249 		return -1;
250 	}
251 
252 	for (i = 0; i < num_events; i++) {
253 		eventq_io = &svdev->eventq_ios[i];
254 		eventq_io->iov.iov_base = &eventq_io->ev;
255 		eventq_io->iov.iov_len = sizeof(eventq_io->ev);
256 		virtio_scsi_dev_send_eventq_io(eventq, eventq_io);
257 	}
258 
259 	svdev->ctrlq_ring = ctrlq_ring;
260 
261 	svdev->mgmt_poller = SPDK_POLLER_REGISTER(bdev_virtio_mgmt_poll, svdev,
262 			     MGMT_POLL_PERIOD_US);
263 
264 	TAILQ_INIT(&svdev->luns);
265 	svdev->scan_ctx = NULL;
266 	svdev->removed = false;
267 	svdev->remove_cb = NULL;
268 	svdev->remove_ctx = NULL;
269 
270 	spdk_io_device_register(svdev, bdev_virtio_scsi_ch_create_cb,
271 				bdev_virtio_scsi_ch_destroy_cb,
272 				sizeof(struct bdev_virtio_io_channel),
273 				svdev->vdev.name);
274 
275 	pthread_mutex_lock(&g_virtio_scsi_mutex);
276 	TAILQ_INSERT_TAIL(&g_virtio_scsi_devs, svdev, tailq);
277 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
278 	return 0;
279 }
280 
281 static struct virtio_scsi_dev *
282 virtio_pci_scsi_dev_create(const char *name, struct virtio_pci_ctx *pci_ctx)
283 {
284 	static int pci_dev_counter = 0;
285 	struct virtio_scsi_dev *svdev;
286 	struct virtio_dev *vdev;
287 	char *default_name = NULL;
288 	uint32_t num_queues;
289 	int rc;
290 
291 	svdev = calloc(1, sizeof(*svdev));
292 	if (svdev == NULL) {
293 		SPDK_ERRLOG("virtio device calloc failed\n");
294 		return NULL;
295 	}
296 
297 	vdev = &svdev->vdev;
298 	if (name == NULL) {
299 		default_name = spdk_sprintf_alloc("VirtioScsi%"PRIu32, pci_dev_counter++);
300 		if (default_name == NULL) {
301 			free(vdev);
302 			return NULL;
303 		}
304 		name = default_name;
305 	}
306 
307 	rc = virtio_pci_dev_init(vdev, name, pci_ctx);
308 	free(default_name);
309 
310 	if (rc != 0) {
311 		free(svdev);
312 		return NULL;
313 	}
314 
315 	rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_scsi_config, num_queues),
316 					&num_queues, sizeof(num_queues));
317 	if (rc) {
318 		SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc));
319 		goto fail;
320 	}
321 
322 	rc = virtio_scsi_dev_init(svdev, num_queues);
323 	if (rc != 0) {
324 		goto fail;
325 	}
326 
327 	return svdev;
328 
329 fail:
330 	vdev->ctx = NULL;
331 	virtio_dev_destruct(vdev);
332 	free(svdev);
333 	return NULL;
334 }
335 
336 static struct virtio_scsi_dev *
337 virtio_user_scsi_dev_create(const char *name, const char *path,
338 			    uint16_t num_queues, uint32_t queue_size)
339 {
340 	struct virtio_scsi_dev *svdev;
341 	struct virtio_dev *vdev;
342 	int rc;
343 
344 	svdev = calloc(1, sizeof(*svdev));
345 	if (svdev == NULL) {
346 		SPDK_ERRLOG("calloc failed for virtio device %s: %s\n", name, path);
347 		return NULL;
348 	}
349 
350 	vdev = &svdev->vdev;
351 	rc = virtio_user_dev_init(vdev, name, path, queue_size);
352 	if (rc != 0) {
353 		SPDK_ERRLOG("Failed to create virito device %s: %s\n", name, path);
354 		free(svdev);
355 		return NULL;
356 	}
357 
358 	rc = virtio_scsi_dev_init(svdev, num_queues);
359 	if (rc != 0) {
360 		virtio_dev_destruct(vdev);
361 		free(svdev);
362 		return NULL;
363 	}
364 
365 	return svdev;
366 }
367 
368 static struct virtio_scsi_disk *
369 virtio_scsi_dev_get_disk_by_id(struct virtio_scsi_dev *svdev, uint8_t target_id)
370 {
371 	struct virtio_scsi_disk *disk;
372 
373 	TAILQ_FOREACH(disk, &svdev->luns, link) {
374 		if (disk->info.target == target_id) {
375 			return disk;
376 		}
377 	}
378 
379 	return NULL;
380 }
381 
382 static int virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev,
383 				bdev_virtio_create_cb cb_fn, void *cb_arg);
384 static int send_scan_io(struct virtio_scsi_scan_base *base);
385 static void _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target);
386 static int _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc);
387 static void _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum);
388 static int virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target);
389 
390 static int
391 bdev_virtio_get_ctx_size(void)
392 {
393 	return sizeof(struct virtio_scsi_io_ctx);
394 }
395 
396 static int
397 bdev_virtio_scsi_config_json(struct spdk_json_write_ctx *w)
398 {
399 	struct virtio_scsi_dev *svdev;
400 
401 	pthread_mutex_lock(&g_virtio_scsi_mutex);
402 	TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) {
403 		spdk_json_write_object_begin(w);
404 
405 		spdk_json_write_named_string(w, "method", "bdev_virtio_attach_controller");
406 
407 		spdk_json_write_named_object_begin(w, "params");
408 		spdk_json_write_named_string(w, "name", svdev->vdev.name);
409 		spdk_json_write_named_string(w, "dev_type", "scsi");
410 
411 		/* Write transport specific parameters. */
412 		svdev->vdev.backend_ops->write_json_config(&svdev->vdev, w);
413 
414 		spdk_json_write_object_end(w);
415 
416 		spdk_json_write_object_end(w);
417 
418 	}
419 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
420 
421 	return 0;
422 }
423 
424 
425 static struct spdk_bdev_module virtio_scsi_if = {
426 	.name = "virtio_scsi",
427 	.module_init = bdev_virtio_initialize,
428 	.module_fini = bdev_virtio_finish,
429 	.get_ctx_size = bdev_virtio_get_ctx_size,
430 	.config_json = bdev_virtio_scsi_config_json,
431 	.async_fini = true,
432 };
433 
434 SPDK_BDEV_MODULE_REGISTER(virtio_scsi, &virtio_scsi_if)
435 
436 static struct virtio_scsi_io_ctx *
437 bdev_virtio_init_io_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
438 {
439 	struct virtio_scsi_cmd_req *req;
440 	struct virtio_scsi_cmd_resp *resp;
441 	struct virtio_scsi_disk *disk = (struct virtio_scsi_disk *)bdev_io->bdev;
442 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
443 
444 	req = &io_ctx->req;
445 	resp = &io_ctx->resp;
446 
447 	io_ctx->iov_req.iov_base = req;
448 	io_ctx->iov_req.iov_len = sizeof(*req);
449 
450 	io_ctx->iov_resp.iov_base = resp;
451 	io_ctx->iov_resp.iov_len = sizeof(*resp);
452 
453 	memset(req, 0, sizeof(*req));
454 	req->lun[0] = 1;
455 	req->lun[1] = disk->info.target;
456 
457 	return io_ctx;
458 }
459 
460 static struct virtio_scsi_io_ctx *
461 bdev_virtio_init_tmf_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
462 {
463 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
464 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
465 	struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev);
466 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
467 
468 	tmf_req = &io_ctx->tmf_req;
469 	tmf_resp = &io_ctx->tmf_resp;
470 
471 	io_ctx->iov_req.iov_base = tmf_req;
472 	io_ctx->iov_req.iov_len = sizeof(*tmf_req);
473 	io_ctx->iov_resp.iov_base = tmf_resp;
474 	io_ctx->iov_resp.iov_len = sizeof(*tmf_resp);
475 
476 	memset(tmf_req, 0, sizeof(*tmf_req));
477 	tmf_req->lun[0] = 1;
478 	tmf_req->lun[1] = disk->info.target;
479 
480 	return io_ctx;
481 }
482 
483 static void
484 bdev_virtio_send_io(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
485 {
486 	struct bdev_virtio_io_channel *virtio_channel = spdk_io_channel_get_ctx(ch);
487 	struct virtqueue *vq = virtio_channel->vq;
488 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
489 	int rc;
490 
491 	rc = virtqueue_req_start(vq, bdev_io, bdev_io->u.bdev.iovcnt + 2);
492 	if (rc == -ENOMEM) {
493 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
494 		return;
495 	} else if (rc != 0) {
496 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
497 		return;
498 	}
499 
500 	virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO);
501 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
502 		virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
503 		virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
504 				       SPDK_VIRTIO_DESC_WR);
505 	} else {
506 		virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
507 				       SPDK_VIRTIO_DESC_RO);
508 		virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
509 	}
510 
511 	virtqueue_req_flush(vq);
512 }
513 
514 static void
515 bdev_virtio_rw(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
516 {
517 	struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev);
518 	struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io);
519 	struct virtio_scsi_cmd_req *req = &io_ctx->req;
520 	bool is_write = bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE;
521 
522 	if (disk->info.num_blocks > (1ULL << 32)) {
523 		req->cdb[0] = is_write ? SPDK_SBC_WRITE_16 : SPDK_SBC_READ_16;
524 		to_be64(&req->cdb[2], bdev_io->u.bdev.offset_blocks);
525 		to_be32(&req->cdb[10], bdev_io->u.bdev.num_blocks);
526 	} else {
527 		req->cdb[0] = is_write ? SPDK_SBC_WRITE_10 : SPDK_SBC_READ_10;
528 		to_be32(&req->cdb[2], bdev_io->u.bdev.offset_blocks);
529 		to_be16(&req->cdb[7], bdev_io->u.bdev.num_blocks);
530 	}
531 
532 	bdev_virtio_send_io(ch, bdev_io);
533 }
534 
535 static void
536 bdev_virtio_reset(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
537 {
538 	struct bdev_virtio_io_channel *virtio_ch = spdk_io_channel_get_ctx(ch);
539 	struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_tmf_vreq(ch, bdev_io);
540 	struct virtio_scsi_ctrl_tmf_req *tmf_req = &io_ctx->tmf_req;
541 	struct virtio_scsi_dev *svdev = virtio_ch->svdev;
542 	size_t enqueued_count;
543 
544 	tmf_req->type = VIRTIO_SCSI_T_TMF;
545 	tmf_req->subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
546 
547 	enqueued_count = spdk_ring_enqueue(svdev->ctrlq_ring, (void **)&bdev_io, 1, NULL);
548 	if (spdk_likely(enqueued_count == 1)) {
549 		return;
550 	} else {
551 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
552 	}
553 }
554 
555 static void
556 bdev_virtio_unmap(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
557 {
558 	struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io);
559 	struct virtio_scsi_cmd_req *req = &io_ctx->req;
560 	struct spdk_scsi_unmap_bdesc *desc, *first_desc;
561 	uint8_t *buf;
562 	uint64_t offset_blocks, num_blocks;
563 	uint16_t cmd_len;
564 
565 	if (!success) {
566 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
567 		return;
568 	}
569 
570 	buf = bdev_io->u.bdev.iovs[0].iov_base;
571 
572 	offset_blocks = bdev_io->u.bdev.offset_blocks;
573 	num_blocks = bdev_io->u.bdev.num_blocks;
574 
575 	/* (n-1) * 16-byte descriptors */
576 	first_desc = desc = (struct spdk_scsi_unmap_bdesc *)&buf[8];
577 	while (num_blocks > UINT32_MAX) {
578 		to_be64(&desc->lba, offset_blocks);
579 		to_be32(&desc->block_count, UINT32_MAX);
580 		memset(&desc->reserved, 0, sizeof(desc->reserved));
581 		offset_blocks += UINT32_MAX;
582 		num_blocks -= UINT32_MAX;
583 		desc++;
584 	}
585 
586 	/* The last descriptor with block_count <= UINT32_MAX */
587 	to_be64(&desc->lba, offset_blocks);
588 	to_be32(&desc->block_count, num_blocks);
589 	memset(&desc->reserved, 0, sizeof(desc->reserved));
590 
591 	/* 8-byte header + n * 16-byte block descriptor */
592 	cmd_len = 8 + (desc - first_desc + 1) *  sizeof(struct spdk_scsi_unmap_bdesc);
593 
594 	req->cdb[0] = SPDK_SBC_UNMAP;
595 	to_be16(&req->cdb[7], cmd_len);
596 
597 	/* 8-byte header */
598 	to_be16(&buf[0], cmd_len - 2); /* total length (excluding the length field) */
599 	to_be16(&buf[2], cmd_len - 8); /* length of block descriptors */
600 	memset(&buf[4], 0, 4); /* reserved */
601 
602 	bdev_virtio_send_io(ch, bdev_io);
603 }
604 
605 static void
606 bdev_virtio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
607 		       bool success)
608 {
609 	if (!success) {
610 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
611 		return;
612 	}
613 
614 	bdev_virtio_rw(ch, bdev_io);
615 }
616 
617 static int
618 _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
619 {
620 	struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev);
621 
622 	switch (bdev_io->type) {
623 	case SPDK_BDEV_IO_TYPE_READ:
624 		spdk_bdev_io_get_buf(bdev_io, bdev_virtio_get_buf_cb,
625 				     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
626 		return 0;
627 	case SPDK_BDEV_IO_TYPE_WRITE:
628 		bdev_virtio_rw(ch, bdev_io);
629 		return 0;
630 	case SPDK_BDEV_IO_TYPE_RESET:
631 		bdev_virtio_reset(ch, bdev_io);
632 		return 0;
633 	case SPDK_BDEV_IO_TYPE_UNMAP: {
634 		uint64_t buf_len = 8 /* header size */ +
635 				   (bdev_io->u.bdev.num_blocks + UINT32_MAX - 1) /
636 				   UINT32_MAX * sizeof(struct spdk_scsi_unmap_bdesc);
637 
638 		if (!disk->info.unmap_supported) {
639 			return -1;
640 		}
641 
642 		if (buf_len > SPDK_BDEV_LARGE_BUF_MAX_SIZE) {
643 			SPDK_ERRLOG("Trying to UNMAP too many blocks: %"PRIu64"\n",
644 				    bdev_io->u.bdev.num_blocks);
645 			return -1;
646 		}
647 		spdk_bdev_io_get_buf(bdev_io, bdev_virtio_unmap, buf_len);
648 		return 0;
649 	}
650 	case SPDK_BDEV_IO_TYPE_FLUSH:
651 	default:
652 		return -1;
653 	}
654 	return 0;
655 }
656 
657 static void
658 bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
659 {
660 	if (_bdev_virtio_submit_request(ch, bdev_io) < 0) {
661 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
662 	}
663 }
664 
665 static bool
666 bdev_virtio_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
667 {
668 	struct virtio_scsi_disk *disk = ctx;
669 
670 	switch (io_type) {
671 	case SPDK_BDEV_IO_TYPE_READ:
672 	case SPDK_BDEV_IO_TYPE_WRITE:
673 	case SPDK_BDEV_IO_TYPE_FLUSH:
674 	case SPDK_BDEV_IO_TYPE_RESET:
675 		return true;
676 
677 	case SPDK_BDEV_IO_TYPE_UNMAP:
678 		return disk->info.unmap_supported;
679 
680 	default:
681 		return false;
682 	}
683 }
684 
685 static struct spdk_io_channel *
686 bdev_virtio_get_io_channel(void *ctx)
687 {
688 	struct virtio_scsi_disk *disk = ctx;
689 
690 	return spdk_get_io_channel(disk->svdev);
691 }
692 
693 static int
694 bdev_virtio_disk_destruct(void *ctx)
695 {
696 	struct virtio_scsi_disk *disk = ctx;
697 	struct virtio_scsi_dev *svdev = disk->svdev;
698 
699 	TAILQ_REMOVE(&svdev->luns, disk, link);
700 	free(disk->bdev.name);
701 	free(disk);
702 
703 	if (svdev->removed && TAILQ_EMPTY(&svdev->luns)) {
704 		spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb);
705 	}
706 
707 	return 0;
708 }
709 
710 static int
711 bdev_virtio_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
712 {
713 	struct virtio_scsi_disk *disk = ctx;
714 
715 	virtio_dev_dump_json_info(&disk->svdev->vdev, w);
716 	return 0;
717 }
718 
719 static void
720 bdev_virtio_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
721 {
722 	/* SCSI targets and LUNS are discovered during scan process so nothing
723 	 * to save here.
724 	 */
725 }
726 
727 static const struct spdk_bdev_fn_table virtio_fn_table = {
728 	.destruct		= bdev_virtio_disk_destruct,
729 	.submit_request		= bdev_virtio_submit_request,
730 	.io_type_supported	= bdev_virtio_io_type_supported,
731 	.get_io_channel		= bdev_virtio_get_io_channel,
732 	.dump_info_json		= bdev_virtio_dump_info_json,
733 	.write_config_json	= bdev_virtio_write_config_json,
734 };
735 
736 static void
737 get_scsi_status(struct virtio_scsi_cmd_resp *resp, int *sk, int *asc, int *ascq)
738 {
739 	/* see spdk_scsi_task_build_sense_data() for sense data details */
740 	*sk = 0;
741 	*asc = 0;
742 	*ascq = 0;
743 
744 	if (resp->sense_len < 3) {
745 		return;
746 	}
747 
748 	*sk = resp->sense[2] & 0xf;
749 
750 	if (resp->sense_len < 13) {
751 		return;
752 	}
753 
754 	*asc = resp->sense[12];
755 
756 	if (resp->sense_len < 14) {
757 		return;
758 	}
759 
760 	*ascq = resp->sense[13];
761 }
762 
763 static void
764 bdev_virtio_io_cpl(struct spdk_bdev_io *bdev_io)
765 {
766 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
767 	int sk, asc, ascq;
768 
769 	get_scsi_status(&io_ctx->resp, &sk, &asc, &ascq);
770 	spdk_bdev_io_complete_scsi_status(bdev_io, io_ctx->resp.status, sk, asc, ascq);
771 }
772 
773 static int
774 bdev_virtio_poll(void *arg)
775 {
776 	struct bdev_virtio_io_channel *ch = arg;
777 	struct virtio_scsi_dev *svdev = ch->svdev;
778 	struct virtio_scsi_scan_base *scan_ctx = svdev->scan_ctx;
779 	void *io[32];
780 	uint32_t io_len[32];
781 	uint16_t i, cnt;
782 	int rc;
783 
784 	cnt = virtio_recv_pkts(ch->vq, (void **)io, io_len, SPDK_COUNTOF(io));
785 	for (i = 0; i < cnt; ++i) {
786 		if (spdk_unlikely(scan_ctx && io[i] == &scan_ctx->io_ctx)) {
787 			if (svdev->removed) {
788 				_virtio_scsi_dev_scan_finish(scan_ctx, -EINTR);
789 				return SPDK_POLLER_BUSY;
790 			}
791 
792 			if (scan_ctx->restart) {
793 				scan_ctx->restart = false;
794 				scan_ctx->full_scan = true;
795 				_virtio_scsi_dev_scan_tgt(scan_ctx, 0);
796 				continue;
797 			}
798 
799 			process_scan_resp(scan_ctx);
800 			continue;
801 		}
802 
803 		bdev_virtio_io_cpl(io[i]);
804 	}
805 
806 	if (spdk_unlikely(scan_ctx && scan_ctx->needs_resend)) {
807 		if (svdev->removed) {
808 			_virtio_scsi_dev_scan_finish(scan_ctx, -EINTR);
809 			return SPDK_POLLER_BUSY;
810 		} else if (cnt == 0) {
811 			return SPDK_POLLER_IDLE;
812 		}
813 
814 		rc = send_scan_io(scan_ctx);
815 		if (rc != 0) {
816 			assert(scan_ctx->retries > 0);
817 			scan_ctx->retries--;
818 			if (scan_ctx->retries == 0) {
819 				SPDK_ERRLOG("Target scan failed unrecoverably with rc = %d.\n", rc);
820 				_virtio_scsi_dev_scan_finish(scan_ctx, rc);
821 			}
822 		}
823 	}
824 
825 	return cnt;
826 }
827 
828 static void
829 bdev_virtio_tmf_cpl_cb(void *ctx)
830 {
831 	struct spdk_bdev_io *bdev_io = ctx;
832 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
833 
834 	if (io_ctx->tmf_resp.response == VIRTIO_SCSI_S_OK) {
835 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
836 	} else {
837 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
838 	}
839 }
840 
841 static void
842 bdev_virtio_tmf_cpl(struct spdk_bdev_io *bdev_io)
843 {
844 	spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), bdev_virtio_tmf_cpl_cb, bdev_io);
845 }
846 
847 static void
848 bdev_virtio_eventq_io_cpl(struct virtio_scsi_dev *svdev, struct virtio_scsi_eventq_io *io)
849 {
850 	struct virtio_scsi_event *ev = &io->ev;
851 	struct virtio_scsi_disk *disk;
852 
853 	if (ev->lun[0] != 1) {
854 		SPDK_WARNLOG("Received an event with invalid data layout.\n");
855 		goto out;
856 	}
857 
858 	if (ev->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
859 		ev->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
860 		virtio_scsi_dev_scan(svdev, NULL, NULL);
861 	}
862 
863 	switch (ev->event) {
864 	case VIRTIO_SCSI_T_NO_EVENT:
865 		break;
866 	case VIRTIO_SCSI_T_TRANSPORT_RESET:
867 		switch (ev->reason) {
868 		case VIRTIO_SCSI_EVT_RESET_RESCAN:
869 			virtio_scsi_dev_scan_tgt(svdev, ev->lun[1]);
870 			break;
871 		case VIRTIO_SCSI_EVT_RESET_REMOVED:
872 			disk = virtio_scsi_dev_get_disk_by_id(svdev, ev->lun[1]);
873 			if (disk != NULL) {
874 				spdk_bdev_unregister(&disk->bdev, NULL, NULL);
875 			}
876 			break;
877 		default:
878 			break;
879 		}
880 		break;
881 	default:
882 		break;
883 	}
884 
885 out:
886 	virtio_scsi_dev_send_eventq_io(svdev->vdev.vqs[VIRTIO_SCSI_EVENTQ], io);
887 }
888 
889 static void
890 bdev_virtio_tmf_abort_nomem_cb(void *ctx)
891 {
892 	struct spdk_bdev_io *bdev_io = ctx;
893 
894 	spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
895 }
896 
897 static void
898 bdev_virtio_tmf_abort_ioerr_cb(void *ctx)
899 {
900 	struct spdk_bdev_io *bdev_io = ctx;
901 
902 	spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
903 }
904 
905 static void
906 bdev_virtio_tmf_abort(struct spdk_bdev_io *bdev_io, int status)
907 {
908 	spdk_msg_fn fn;
909 
910 	if (status == -ENOMEM) {
911 		fn = bdev_virtio_tmf_abort_nomem_cb;
912 	} else {
913 		fn = bdev_virtio_tmf_abort_ioerr_cb;
914 	}
915 
916 	spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), fn, bdev_io);
917 }
918 
919 static int
920 bdev_virtio_send_tmf_io(struct virtqueue *ctrlq, struct spdk_bdev_io *bdev_io)
921 {
922 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
923 	int rc;
924 
925 	rc = virtqueue_req_start(ctrlq, bdev_io, 2);
926 	if (rc != 0) {
927 		return rc;
928 	}
929 
930 	virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO);
931 	virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
932 
933 	virtqueue_req_flush(ctrlq);
934 	return 0;
935 }
936 
937 static int
938 bdev_virtio_mgmt_poll(void *arg)
939 {
940 	struct virtio_scsi_dev *svdev = arg;
941 	struct virtio_dev *vdev = &svdev->vdev;
942 	struct virtqueue *eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ];
943 	struct virtqueue *ctrlq = vdev->vqs[VIRTIO_SCSI_CONTROLQ];
944 	struct spdk_ring *send_ring = svdev->ctrlq_ring;
945 	void *io[16];
946 	uint32_t io_len[16];
947 	uint16_t i, cnt;
948 	int rc;
949 	int total = 0;
950 
951 	cnt = spdk_ring_dequeue(send_ring, io, SPDK_COUNTOF(io));
952 	total += cnt;
953 	for (i = 0; i < cnt; ++i) {
954 		rc = bdev_virtio_send_tmf_io(ctrlq, io[i]);
955 		if (rc != 0) {
956 			bdev_virtio_tmf_abort(io[i], rc);
957 		}
958 	}
959 
960 	cnt = virtio_recv_pkts(ctrlq, io, io_len, SPDK_COUNTOF(io));
961 	total += cnt;
962 	for (i = 0; i < cnt; ++i) {
963 		bdev_virtio_tmf_cpl(io[i]);
964 	}
965 
966 	cnt = virtio_recv_pkts(eventq, io, io_len, SPDK_COUNTOF(io));
967 	total += cnt;
968 	for (i = 0; i < cnt; ++i) {
969 		bdev_virtio_eventq_io_cpl(svdev, io[i]);
970 	}
971 
972 	return total;
973 }
974 
975 static int
976 bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf)
977 {
978 	struct virtio_scsi_dev *svdev = io_device;
979 	struct virtio_dev *vdev = &svdev->vdev;
980 	struct bdev_virtio_io_channel *ch = ctx_buf;
981 	struct virtqueue *vq;
982 	int32_t queue_idx;
983 
984 	queue_idx = virtio_dev_find_and_acquire_queue(vdev, VIRTIO_SCSI_REQUESTQ);
985 	if (queue_idx < 0) {
986 		SPDK_ERRLOG("Couldn't get an unused queue for the io_channel.\n");
987 		return -1;
988 	}
989 
990 	vq = vdev->vqs[queue_idx];
991 
992 	ch->svdev = svdev;
993 	ch->vq = vq;
994 
995 	ch->poller = SPDK_POLLER_REGISTER(bdev_virtio_poll, ch, 0);
996 
997 	return 0;
998 }
999 
1000 static void
1001 bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf)
1002 {
1003 	struct bdev_virtio_io_channel *ch = ctx_buf;
1004 	struct virtio_scsi_dev *svdev = ch->svdev;
1005 	struct virtio_dev *vdev = &svdev->vdev;
1006 	struct virtqueue *vq = ch->vq;
1007 
1008 	spdk_poller_unregister(&ch->poller);
1009 	virtio_dev_release_queue(vdev, vq->vq_queue_index);
1010 }
1011 
1012 static void
1013 _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum)
1014 {
1015 	struct virtio_scsi_dev *svdev = base->svdev;
1016 	size_t bdevs_cnt;
1017 	struct spdk_bdev *bdevs[BDEV_VIRTIO_MAX_TARGET];
1018 	struct virtio_scsi_disk *disk;
1019 	struct virtio_scsi_scan_info *tgt, *next_tgt;
1020 
1021 	spdk_put_io_channel(spdk_io_channel_from_ctx(base->channel));
1022 	base->svdev->scan_ctx = NULL;
1023 
1024 	TAILQ_FOREACH_SAFE(tgt, &base->scan_queue, tailq, next_tgt) {
1025 		TAILQ_REMOVE(&base->scan_queue, tgt, tailq);
1026 		free(tgt);
1027 	}
1028 
1029 	if (base->cb_fn == NULL) {
1030 		spdk_free(base);
1031 		return;
1032 	}
1033 
1034 	bdevs_cnt = 0;
1035 	if (errnum == 0) {
1036 		TAILQ_FOREACH(disk, &svdev->luns, link) {
1037 			bdevs[bdevs_cnt] = &disk->bdev;
1038 			bdevs_cnt++;
1039 		}
1040 	}
1041 
1042 	base->cb_fn(base->cb_arg, errnum, bdevs, bdevs_cnt);
1043 	spdk_free(base);
1044 }
1045 
1046 static int
1047 send_scan_io(struct virtio_scsi_scan_base *base)
1048 {
1049 	struct virtio_scsi_io_ctx *io_ctx = &base->io_ctx;
1050 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1051 	struct virtqueue *vq = base->channel->vq;
1052 	int payload_iov_cnt = base->iov.iov_len > 0 ? 1 : 0;
1053 	int rc;
1054 
1055 	req->lun[0] = 1;
1056 	req->lun[1] = base->info.target;
1057 
1058 	rc = virtqueue_req_start(vq, io_ctx, 2 + payload_iov_cnt);
1059 	if (rc != 0) {
1060 		base->needs_resend = true;
1061 		return -1;
1062 	}
1063 
1064 	virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO);
1065 	virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
1066 	virtqueue_req_add_iovs(vq, &base->iov, payload_iov_cnt, SPDK_VIRTIO_DESC_WR);
1067 
1068 	virtqueue_req_flush(vq);
1069 	return 0;
1070 }
1071 
1072 static int
1073 send_inquiry(struct virtio_scsi_scan_base *base)
1074 {
1075 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1076 	struct spdk_scsi_cdb_inquiry *cdb;
1077 
1078 	memset(req, 0, sizeof(*req));
1079 
1080 	base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE;
1081 	cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb;
1082 	cdb->opcode = SPDK_SPC_INQUIRY;
1083 	to_be16(cdb->alloc_len, BDEV_VIRTIO_SCAN_PAYLOAD_SIZE);
1084 
1085 	return send_scan_io(base);
1086 }
1087 
1088 static int
1089 send_inquiry_vpd(struct virtio_scsi_scan_base *base, uint8_t page_code)
1090 {
1091 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1092 	struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb;
1093 
1094 	memset(req, 0, sizeof(*req));
1095 
1096 	base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE;
1097 	inquiry_cdb->opcode = SPDK_SPC_INQUIRY;
1098 	inquiry_cdb->evpd = 1;
1099 	inquiry_cdb->page_code = page_code;
1100 	to_be16(inquiry_cdb->alloc_len, base->iov.iov_len);
1101 
1102 	return send_scan_io(base);
1103 }
1104 
1105 static int
1106 send_read_cap_10(struct virtio_scsi_scan_base *base)
1107 {
1108 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1109 
1110 	memset(req, 0, sizeof(*req));
1111 
1112 	base->iov.iov_len = 8;
1113 	req->cdb[0] = SPDK_SBC_READ_CAPACITY_10;
1114 
1115 	return send_scan_io(base);
1116 }
1117 
1118 static int
1119 send_read_cap_16(struct virtio_scsi_scan_base *base)
1120 {
1121 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1122 
1123 	memset(req, 0, sizeof(*req));
1124 
1125 	base->iov.iov_len = 32;
1126 	req->cdb[0] = SPDK_SPC_SERVICE_ACTION_IN_16;
1127 	req->cdb[1] = SPDK_SBC_SAI_READ_CAPACITY_16;
1128 	to_be32(&req->cdb[10], base->iov.iov_len);
1129 
1130 	return send_scan_io(base);
1131 }
1132 
1133 static int
1134 send_test_unit_ready(struct virtio_scsi_scan_base *base)
1135 {
1136 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1137 
1138 	memset(req, 0, sizeof(*req));
1139 	req->cdb[0] = SPDK_SPC_TEST_UNIT_READY;
1140 	base->iov.iov_len = 0;
1141 
1142 	return send_scan_io(base);
1143 }
1144 
1145 static int
1146 send_start_stop_unit(struct virtio_scsi_scan_base *base)
1147 {
1148 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1149 
1150 	memset(req, 0, sizeof(*req));
1151 	req->cdb[0] = SPDK_SBC_START_STOP_UNIT;
1152 	req->cdb[4] = SPDK_SBC_START_STOP_UNIT_START_BIT;
1153 	base->iov.iov_len = 0;
1154 
1155 	return send_scan_io(base);
1156 }
1157 
1158 static int
1159 process_scan_start_stop_unit(struct virtio_scsi_scan_base *base)
1160 {
1161 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1162 
1163 	if (resp->status == SPDK_SCSI_STATUS_GOOD) {
1164 		return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES);
1165 	}
1166 
1167 	return -1;
1168 }
1169 
1170 static int
1171 process_scan_test_unit_ready(struct virtio_scsi_scan_base *base)
1172 {
1173 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1174 	int sk, asc, ascq;
1175 
1176 	get_scsi_status(resp, &sk, &asc, &ascq);
1177 
1178 	/* check response, get VPD if spun up otherwise send SSU */
1179 	if (resp->status == SPDK_SCSI_STATUS_GOOD) {
1180 		return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES);
1181 	} else if (resp->response == VIRTIO_SCSI_S_OK &&
1182 		   resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION &&
1183 		   sk == SPDK_SCSI_SENSE_UNIT_ATTENTION &&
1184 		   asc == SPDK_SCSI_ASC_LOGICAL_UNIT_NOT_READY) {
1185 		return send_start_stop_unit(base);
1186 	} else {
1187 		return -1;
1188 	}
1189 }
1190 
1191 static int
1192 process_scan_inquiry_standard(struct virtio_scsi_scan_base *base)
1193 {
1194 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1195 	struct spdk_scsi_cdb_inquiry_data *inquiry_data =
1196 		(struct spdk_scsi_cdb_inquiry_data *)base->payload;
1197 
1198 	if (resp->status != SPDK_SCSI_STATUS_GOOD) {
1199 		return -1;
1200 	}
1201 
1202 	/* check to make sure its a supported device */
1203 	if (inquiry_data->peripheral_device_type != SPDK_SPC_PERIPHERAL_DEVICE_TYPE_DISK ||
1204 	    inquiry_data->peripheral_qualifier != SPDK_SPC_PERIPHERAL_QUALIFIER_CONNECTED) {
1205 		SPDK_WARNLOG("Unsupported peripheral device type 0x%02x (qualifier 0x%02x)\n",
1206 			     inquiry_data->peripheral_device_type,
1207 			     inquiry_data->peripheral_qualifier);
1208 		return -1;
1209 	}
1210 
1211 	return send_test_unit_ready(base);
1212 }
1213 
1214 static int
1215 process_scan_inquiry_vpd_supported_vpd_pages(struct virtio_scsi_scan_base *base)
1216 {
1217 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1218 	bool block_provisioning_page_supported = false;
1219 
1220 	if (resp->status == SPDK_SCSI_STATUS_GOOD) {
1221 		const uint8_t *vpd_data = base->payload;
1222 		const uint8_t *supported_vpd_pages = vpd_data + 4;
1223 		uint16_t page_length;
1224 		uint16_t num_supported_pages;
1225 		uint16_t i;
1226 
1227 		page_length = from_be16(vpd_data + 2);
1228 		num_supported_pages = spdk_min(page_length, base->iov.iov_len - 4);
1229 
1230 		for (i = 0; i < num_supported_pages; i++) {
1231 			if (supported_vpd_pages[i] == SPDK_SPC_VPD_BLOCK_THIN_PROVISION) {
1232 				block_provisioning_page_supported = true;
1233 				break;
1234 			}
1235 		}
1236 	}
1237 
1238 	if (block_provisioning_page_supported) {
1239 		return send_inquiry_vpd(base, SPDK_SPC_VPD_BLOCK_THIN_PROVISION);
1240 	} else {
1241 		return send_read_cap_10(base);
1242 	}
1243 }
1244 
1245 static int
1246 process_scan_inquiry_vpd_block_thin_provision(struct virtio_scsi_scan_base *base)
1247 {
1248 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1249 
1250 	base->info.unmap_supported = false;
1251 
1252 	if (resp->status == SPDK_SCSI_STATUS_GOOD) {
1253 		uint8_t *vpd_data = base->payload;
1254 
1255 		base->info.unmap_supported = !!(vpd_data[5] & SPDK_SCSI_UNMAP_LBPU);
1256 	}
1257 
1258 	SPDK_INFOLOG(virtio, "Target %u: unmap supported = %d\n",
1259 		     base->info.target, (int)base->info.unmap_supported);
1260 
1261 	return send_read_cap_10(base);
1262 }
1263 
1264 static int
1265 process_scan_inquiry(struct virtio_scsi_scan_base *base)
1266 {
1267 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1268 	struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb;
1269 
1270 	if ((inquiry_cdb->evpd & 1) == 0) {
1271 		return process_scan_inquiry_standard(base);
1272 	}
1273 
1274 	switch (inquiry_cdb->page_code) {
1275 	case SPDK_SPC_VPD_SUPPORTED_VPD_PAGES:
1276 		return process_scan_inquiry_vpd_supported_vpd_pages(base);
1277 	case SPDK_SPC_VPD_BLOCK_THIN_PROVISION:
1278 		return process_scan_inquiry_vpd_block_thin_provision(base);
1279 	default:
1280 		SPDK_DEBUGLOG(virtio, "Unexpected VPD page 0x%02x\n", inquiry_cdb->page_code);
1281 		return -1;
1282 	}
1283 }
1284 
1285 static void
1286 bdev_virtio_disk_notify_remove(struct virtio_scsi_disk *disk)
1287 {
1288 	disk->removed = true;
1289 	spdk_bdev_close(disk->notify_desc);
1290 }
1291 
1292 static void
1293 bdev_virtio_disk_notify_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
1294 				 void *event_ctx)
1295 {
1296 	switch (type) {
1297 	case SPDK_BDEV_EVENT_REMOVE:
1298 		bdev_virtio_disk_notify_remove(event_ctx);
1299 		break;
1300 	default:
1301 		SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
1302 		break;
1303 	}
1304 }
1305 
1306 /* To be called only from the thread performing target scan */
1307 static int
1308 virtio_scsi_dev_add_tgt(struct virtio_scsi_dev *svdev, struct virtio_scsi_scan_info *info)
1309 {
1310 	struct virtio_scsi_disk *disk;
1311 	struct spdk_bdev *bdev;
1312 	int rc;
1313 
1314 	TAILQ_FOREACH(disk, &svdev->luns, link) {
1315 		if (disk->info.target == info->target) {
1316 			/* Target is already attached and param change is not supported */
1317 			return 0;
1318 		}
1319 	}
1320 
1321 	if (info->block_size == 0 || info->num_blocks == 0) {
1322 		SPDK_ERRLOG("%s: invalid target %u: bs=%"PRIu32" blocks=%"PRIu64"\n",
1323 			    svdev->vdev.name, info->target, info->block_size, info->num_blocks);
1324 		return -EINVAL;
1325 	}
1326 
1327 	disk = calloc(1, sizeof(*disk));
1328 	if (disk == NULL) {
1329 		SPDK_ERRLOG("could not allocate disk\n");
1330 		return -ENOMEM;
1331 	}
1332 
1333 	disk->svdev = svdev;
1334 	memcpy(&disk->info, info, sizeof(*info));
1335 
1336 	bdev = &disk->bdev;
1337 	bdev->name = spdk_sprintf_alloc("%st%"PRIu8, svdev->vdev.name, info->target);
1338 	if (bdev->name == NULL) {
1339 		SPDK_ERRLOG("Couldn't alloc memory for the bdev name.\n");
1340 		free(disk);
1341 		return -ENOMEM;
1342 	}
1343 
1344 	bdev->product_name = "Virtio SCSI Disk";
1345 	bdev->write_cache = 0;
1346 	bdev->blocklen = disk->info.block_size;
1347 	bdev->blockcnt = disk->info.num_blocks;
1348 
1349 	bdev->ctxt = disk;
1350 	bdev->fn_table = &virtio_fn_table;
1351 	bdev->module = &virtio_scsi_if;
1352 
1353 	rc = spdk_bdev_register(&disk->bdev);
1354 	if (rc) {
1355 		SPDK_ERRLOG("Failed to register bdev name=%s\n", disk->bdev.name);
1356 		free(bdev->name);
1357 		free(disk);
1358 		return rc;
1359 	}
1360 
1361 	rc = spdk_bdev_open_ext(bdev->name, false, bdev_virtio_disk_notify_event_cb,
1362 				disk, &disk->notify_desc);
1363 	if (rc) {
1364 		assert(false);
1365 	}
1366 
1367 	TAILQ_INSERT_TAIL(&svdev->luns, disk, link);
1368 	return 0;
1369 }
1370 
1371 static int
1372 process_read_cap_10(struct virtio_scsi_scan_base *base)
1373 {
1374 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1375 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1376 	uint64_t max_block;
1377 	uint32_t block_size;
1378 	uint8_t target_id = req->lun[1];
1379 	int rc;
1380 
1381 	if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) {
1382 		SPDK_ERRLOG("READ CAPACITY (10) failed for target %"PRIu8".\n", target_id);
1383 		return -1;
1384 	}
1385 
1386 	block_size = from_be32(base->payload + 4);
1387 	max_block = from_be32(base->payload);
1388 
1389 	if (max_block == 0xffffffff) {
1390 		return send_read_cap_16(base);
1391 	}
1392 
1393 	base->info.num_blocks = (uint64_t)max_block + 1;
1394 	base->info.block_size = block_size;
1395 
1396 	rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info);
1397 	if (rc != 0) {
1398 		return rc;
1399 	}
1400 
1401 	return _virtio_scsi_dev_scan_next(base, 0);
1402 }
1403 
1404 static int
1405 process_read_cap_16(struct virtio_scsi_scan_base *base)
1406 {
1407 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1408 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1409 	uint8_t target_id = req->lun[1];
1410 	int rc;
1411 
1412 	if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) {
1413 		SPDK_ERRLOG("READ CAPACITY (16) failed for target %"PRIu8".\n", target_id);
1414 		return -1;
1415 	}
1416 
1417 	base->info.num_blocks = from_be64(base->payload) + 1;
1418 	base->info.block_size = from_be32(base->payload + 8);
1419 	rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info);
1420 	if (rc != 0) {
1421 		return rc;
1422 	}
1423 
1424 	return _virtio_scsi_dev_scan_next(base, 0);
1425 }
1426 
1427 static void
1428 process_scan_resp(struct virtio_scsi_scan_base *base)
1429 {
1430 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1431 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1432 	int rc, sk, asc, ascq;
1433 	uint8_t target_id;
1434 
1435 	if (base->io_ctx.iov_req.iov_len < sizeof(struct virtio_scsi_cmd_req) ||
1436 	    base->io_ctx.iov_resp.iov_len < sizeof(struct virtio_scsi_cmd_resp)) {
1437 		SPDK_ERRLOG("Received target scan message with invalid length.\n");
1438 		_virtio_scsi_dev_scan_next(base, -EIO);
1439 		return;
1440 	}
1441 
1442 	get_scsi_status(resp, &sk, &asc, &ascq);
1443 	target_id = req->lun[1];
1444 
1445 	if (resp->response == VIRTIO_SCSI_S_BAD_TARGET ||
1446 	    resp->response == VIRTIO_SCSI_S_INCORRECT_LUN) {
1447 		_virtio_scsi_dev_scan_next(base, -ENODEV);
1448 		return;
1449 	}
1450 
1451 	if (resp->response != VIRTIO_SCSI_S_OK ||
1452 	    (resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION &&
1453 	     sk != SPDK_SCSI_SENSE_ILLEGAL_REQUEST)) {
1454 		assert(base->retries > 0);
1455 		base->retries--;
1456 		if (base->retries == 0) {
1457 			SPDK_NOTICELOG("Target %"PRIu8" is present, but unavailable.\n", target_id);
1458 			SPDK_LOGDUMP(virtio, "CDB", req->cdb, sizeof(req->cdb));
1459 			SPDK_LOGDUMP(virtio, "SENSE DATA", resp->sense, sizeof(resp->sense));
1460 			_virtio_scsi_dev_scan_next(base, -EBUSY);
1461 			return;
1462 		}
1463 
1464 		/* resend the same request */
1465 		rc = send_scan_io(base);
1466 		if (rc != 0) {
1467 			/* Let response poller do the resend */
1468 		}
1469 		return;
1470 	}
1471 
1472 	base->retries = SCAN_REQUEST_RETRIES;
1473 
1474 	switch (req->cdb[0]) {
1475 	case SPDK_SPC_INQUIRY:
1476 		rc = process_scan_inquiry(base);
1477 		break;
1478 	case SPDK_SPC_TEST_UNIT_READY:
1479 		rc = process_scan_test_unit_ready(base);
1480 		break;
1481 	case SPDK_SBC_START_STOP_UNIT:
1482 		rc = process_scan_start_stop_unit(base);
1483 		break;
1484 	case SPDK_SBC_READ_CAPACITY_10:
1485 		rc = process_read_cap_10(base);
1486 		break;
1487 	case SPDK_SPC_SERVICE_ACTION_IN_16:
1488 		rc = process_read_cap_16(base);
1489 		break;
1490 	default:
1491 		SPDK_ERRLOG("Received invalid target scan message: cdb[0] = %"PRIu8".\n", req->cdb[0]);
1492 		rc = -1;
1493 		break;
1494 	}
1495 
1496 	if (rc != 0) {
1497 		if (base->needs_resend) {
1498 			return; /* Let response poller do the resend */
1499 		}
1500 
1501 		_virtio_scsi_dev_scan_next(base, rc);
1502 	}
1503 }
1504 
1505 static int
1506 _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc)
1507 {
1508 	struct virtio_scsi_scan_info *next;
1509 	struct virtio_scsi_disk *disk;
1510 	uint8_t target_id;
1511 
1512 	if (base->full_scan) {
1513 		if (rc != 0) {
1514 			disk = virtio_scsi_dev_get_disk_by_id(base->svdev,
1515 							      base->info.target);
1516 			if (disk != NULL) {
1517 				spdk_bdev_unregister(&disk->bdev, NULL, NULL);
1518 			}
1519 		}
1520 
1521 		target_id = base->info.target + 1;
1522 		if (target_id < BDEV_VIRTIO_MAX_TARGET) {
1523 			_virtio_scsi_dev_scan_tgt(base, target_id);
1524 			return 0;
1525 		}
1526 
1527 		base->full_scan = false;
1528 	}
1529 
1530 	next = TAILQ_FIRST(&base->scan_queue);
1531 	if (next == NULL) {
1532 		_virtio_scsi_dev_scan_finish(base, 0);
1533 		return 0;
1534 	}
1535 
1536 	TAILQ_REMOVE(&base->scan_queue, next, tailq);
1537 	target_id = next->target;
1538 	free(next);
1539 
1540 	_virtio_scsi_dev_scan_tgt(base, target_id);
1541 	return 0;
1542 }
1543 
1544 static int
1545 _virtio_scsi_dev_scan_init(struct virtio_scsi_dev *svdev)
1546 {
1547 	struct virtio_scsi_scan_base *base;
1548 	struct spdk_io_channel *io_ch;
1549 	struct virtio_scsi_io_ctx *io_ctx;
1550 	struct virtio_scsi_cmd_req *req;
1551 	struct virtio_scsi_cmd_resp *resp;
1552 
1553 	io_ch = spdk_get_io_channel(svdev);
1554 	if (io_ch == NULL) {
1555 		return -EBUSY;
1556 	}
1557 
1558 	base = spdk_zmalloc(sizeof(*base), 64, NULL,
1559 			    SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1560 	if (base == NULL) {
1561 		SPDK_ERRLOG("couldn't allocate memory for scsi target scan.\n");
1562 		return -ENOMEM;
1563 	}
1564 
1565 	base->svdev = svdev;
1566 
1567 	base->channel = spdk_io_channel_get_ctx(io_ch);
1568 	TAILQ_INIT(&base->scan_queue);
1569 	svdev->scan_ctx = base;
1570 
1571 	base->iov.iov_base = base->payload;
1572 	io_ctx = &base->io_ctx;
1573 	req = &io_ctx->req;
1574 	resp = &io_ctx->resp;
1575 	io_ctx->iov_req.iov_base = req;
1576 	io_ctx->iov_req.iov_len = sizeof(*req);
1577 	io_ctx->iov_resp.iov_base = resp;
1578 	io_ctx->iov_resp.iov_len = sizeof(*resp);
1579 
1580 	base->retries = SCAN_REQUEST_RETRIES;
1581 	return 0;
1582 }
1583 
1584 static void
1585 _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target)
1586 {
1587 	int rc;
1588 
1589 	memset(&base->info, 0, sizeof(base->info));
1590 	base->info.target = target;
1591 
1592 	rc = send_inquiry(base);
1593 	if (rc) {
1594 		/* Let response poller do the resend */
1595 	}
1596 }
1597 
1598 static int
1599 virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev, bdev_virtio_create_cb cb_fn,
1600 		     void *cb_arg)
1601 {
1602 	struct virtio_scsi_scan_base *base;
1603 	struct virtio_scsi_scan_info *tgt, *next_tgt;
1604 	int rc;
1605 
1606 	if (svdev->scan_ctx) {
1607 		if (svdev->scan_ctx->full_scan) {
1608 			return -EEXIST;
1609 		}
1610 
1611 		/* We're about to start a full rescan, so there's no need
1612 		 * to scan particular targets afterwards.
1613 		 */
1614 		TAILQ_FOREACH_SAFE(tgt, &svdev->scan_ctx->scan_queue, tailq, next_tgt) {
1615 			TAILQ_REMOVE(&svdev->scan_ctx->scan_queue, tgt, tailq);
1616 			free(tgt);
1617 		}
1618 
1619 		svdev->scan_ctx->cb_fn = cb_fn;
1620 		svdev->scan_ctx->cb_arg = cb_arg;
1621 		svdev->scan_ctx->restart = true;
1622 		return 0;
1623 	}
1624 
1625 	rc = _virtio_scsi_dev_scan_init(svdev);
1626 	if (rc != 0) {
1627 		return rc;
1628 	}
1629 
1630 	base = svdev->scan_ctx;
1631 	base->cb_fn = cb_fn;
1632 	base->cb_arg = cb_arg;
1633 	base->full_scan = true;
1634 
1635 	_virtio_scsi_dev_scan_tgt(base, 0);
1636 	return 0;
1637 }
1638 
1639 static int
1640 virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target)
1641 {
1642 	struct virtio_scsi_scan_base *base;
1643 	struct virtio_scsi_scan_info *info;
1644 	int rc;
1645 
1646 	base = svdev->scan_ctx;
1647 	if (base) {
1648 		info = calloc(1, sizeof(*info));
1649 		if (info == NULL) {
1650 			SPDK_ERRLOG("calloc failed\n");
1651 			return -ENOMEM;
1652 		}
1653 
1654 		info->target = target;
1655 		TAILQ_INSERT_TAIL(&base->scan_queue, info, tailq);
1656 		return 0;
1657 	}
1658 
1659 	rc = _virtio_scsi_dev_scan_init(svdev);
1660 	if (rc != 0) {
1661 		return rc;
1662 	}
1663 
1664 	base = svdev->scan_ctx;
1665 	base->full_scan = true;
1666 	_virtio_scsi_dev_scan_tgt(base, target);
1667 	return 0;
1668 }
1669 
1670 static int
1671 bdev_virtio_initialize(void)
1672 {
1673 	return 0;
1674 }
1675 
1676 static void
1677 _virtio_scsi_dev_unregister_cb(void *io_device)
1678 {
1679 	struct virtio_scsi_dev *svdev = io_device;
1680 	struct virtio_dev *vdev = &svdev->vdev;
1681 	bool finish_module;
1682 	bdev_virtio_remove_cb remove_cb;
1683 	void *remove_ctx;
1684 
1685 	assert(spdk_ring_count(svdev->ctrlq_ring) == 0);
1686 	spdk_ring_free(svdev->ctrlq_ring);
1687 	spdk_poller_unregister(&svdev->mgmt_poller);
1688 
1689 	virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ);
1690 	virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ);
1691 
1692 	virtio_dev_stop(vdev);
1693 	virtio_dev_destruct(vdev);
1694 
1695 	pthread_mutex_lock(&g_virtio_scsi_mutex);
1696 	TAILQ_REMOVE(&g_virtio_scsi_devs, svdev, tailq);
1697 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
1698 
1699 	remove_cb = svdev->remove_cb;
1700 	remove_ctx = svdev->remove_ctx;
1701 	spdk_free(svdev->eventq_ios);
1702 	free(svdev);
1703 
1704 	if (remove_cb) {
1705 		remove_cb(remove_ctx, 0);
1706 	}
1707 
1708 	finish_module = TAILQ_EMPTY(&g_virtio_scsi_devs);
1709 
1710 	if (g_bdev_virtio_finish && finish_module) {
1711 		spdk_bdev_module_fini_done();
1712 	}
1713 }
1714 
1715 static void
1716 virtio_scsi_dev_unregister_cb(void *io_device)
1717 {
1718 	struct virtio_scsi_dev *svdev = io_device;
1719 	struct spdk_thread *thread;
1720 
1721 	thread = virtio_dev_queue_get_thread(&svdev->vdev, VIRTIO_SCSI_CONTROLQ);
1722 	spdk_thread_send_msg(thread, _virtio_scsi_dev_unregister_cb, io_device);
1723 }
1724 
1725 static void
1726 virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev,
1727 		       bdev_virtio_remove_cb cb_fn, void *cb_arg)
1728 {
1729 	struct virtio_scsi_disk *disk, *disk_tmp;
1730 	bool do_remove = true;
1731 
1732 	if (svdev->removed) {
1733 		if (cb_fn) {
1734 			cb_fn(cb_arg, -EBUSY);
1735 		}
1736 		return;
1737 	}
1738 
1739 	svdev->remove_cb = cb_fn;
1740 	svdev->remove_ctx = cb_arg;
1741 	svdev->removed = true;
1742 
1743 	if (svdev->scan_ctx) {
1744 		/* The removal will continue after we receive a pending scan I/O. */
1745 		return;
1746 	}
1747 
1748 	TAILQ_FOREACH_SAFE(disk, &svdev->luns, link, disk_tmp) {
1749 		if (!disk->removed) {
1750 			spdk_bdev_unregister(&disk->bdev, NULL, NULL);
1751 		}
1752 		do_remove = false;
1753 	}
1754 
1755 	if (do_remove) {
1756 		spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb);
1757 	}
1758 }
1759 
1760 static void
1761 bdev_virtio_finish(void)
1762 {
1763 	struct virtio_scsi_dev *svdev, *next;
1764 
1765 	g_bdev_virtio_finish = true;
1766 
1767 	pthread_mutex_lock(&g_virtio_scsi_mutex);
1768 	if (TAILQ_EMPTY(&g_virtio_scsi_devs)) {
1769 		pthread_mutex_unlock(&g_virtio_scsi_mutex);
1770 		spdk_bdev_module_fini_done();
1771 		return;
1772 	}
1773 
1774 	/* Defer module finish until all controllers are removed. */
1775 	TAILQ_FOREACH_SAFE(svdev, &g_virtio_scsi_devs, tailq, next) {
1776 		virtio_scsi_dev_remove(svdev, NULL, NULL);
1777 	}
1778 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
1779 }
1780 
1781 int
1782 bdev_virtio_user_scsi_dev_create(const char *base_name, const char *path,
1783 				 unsigned num_queues, unsigned queue_size,
1784 				 bdev_virtio_create_cb cb_fn, void *cb_arg)
1785 {
1786 	struct virtio_scsi_dev *svdev;
1787 	int rc;
1788 
1789 	svdev = virtio_user_scsi_dev_create(base_name, path, num_queues, queue_size);
1790 	if (svdev == NULL) {
1791 		return -1;
1792 	}
1793 
1794 	rc = virtio_scsi_dev_scan(svdev, cb_fn, cb_arg);
1795 	if (rc) {
1796 		virtio_scsi_dev_remove(svdev, NULL, NULL);
1797 	}
1798 
1799 	return rc;
1800 }
1801 
1802 struct bdev_virtio_pci_dev_create_ctx {
1803 	const char *name;
1804 	bdev_virtio_create_cb cb_fn;
1805 	void *cb_arg;
1806 };
1807 
1808 static int
1809 bdev_virtio_pci_scsi_dev_create_cb(struct virtio_pci_ctx *pci_ctx, void *ctx)
1810 {
1811 	struct virtio_scsi_dev *svdev;
1812 	struct bdev_virtio_pci_dev_create_ctx *create_ctx = ctx;
1813 	int rc;
1814 
1815 	svdev = virtio_pci_scsi_dev_create(create_ctx->name, pci_ctx);
1816 	if (svdev == NULL) {
1817 		return -1;
1818 	}
1819 
1820 	rc = virtio_scsi_dev_scan(svdev, create_ctx->cb_fn, create_ctx->cb_arg);
1821 	if (rc) {
1822 		svdev->vdev.ctx = NULL;
1823 		virtio_scsi_dev_remove(svdev, NULL, NULL);
1824 	}
1825 
1826 	return rc;
1827 }
1828 
1829 int
1830 bdev_virtio_pci_scsi_dev_create(const char *name, struct spdk_pci_addr *pci_addr,
1831 				bdev_virtio_create_cb cb_fn, void *cb_arg)
1832 {
1833 	struct bdev_virtio_pci_dev_create_ctx create_ctx;
1834 
1835 	create_ctx.name = name;
1836 	create_ctx.cb_fn = cb_fn;
1837 	create_ctx.cb_arg = cb_arg;
1838 
1839 	return virtio_pci_dev_attach(bdev_virtio_pci_scsi_dev_create_cb, &create_ctx,
1840 				     VIRTIO_ID_SCSI, pci_addr);
1841 }
1842 
1843 int
1844 bdev_virtio_scsi_dev_remove(const char *name, bdev_virtio_remove_cb cb_fn, void *cb_arg)
1845 {
1846 	struct virtio_scsi_dev *svdev;
1847 
1848 	pthread_mutex_lock(&g_virtio_scsi_mutex);
1849 	TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) {
1850 		if (strcmp(svdev->vdev.name, name) == 0) {
1851 			break;
1852 		}
1853 	}
1854 
1855 	if (svdev == NULL) {
1856 		pthread_mutex_unlock(&g_virtio_scsi_mutex);
1857 		SPDK_ERRLOG("Cannot find Virtio-SCSI device named '%s'\n", name);
1858 		return -ENODEV;
1859 	}
1860 
1861 	virtio_scsi_dev_remove(svdev, cb_fn, cb_arg);
1862 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
1863 
1864 	return 0;
1865 }
1866 
1867 void
1868 bdev_virtio_scsi_dev_list(struct spdk_json_write_ctx *w)
1869 {
1870 	struct virtio_scsi_dev *svdev;
1871 
1872 	spdk_json_write_array_begin(w);
1873 
1874 	pthread_mutex_lock(&g_virtio_scsi_mutex);
1875 	TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) {
1876 		spdk_json_write_object_begin(w);
1877 
1878 		spdk_json_write_named_string(w, "name", svdev->vdev.name);
1879 
1880 		virtio_dev_dump_json_info(&svdev->vdev, w);
1881 
1882 		spdk_json_write_object_end(w);
1883 	}
1884 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
1885 
1886 	spdk_json_write_array_end(w);
1887 }
1888 
1889 SPDK_LOG_REGISTER_COMPONENT(virtio)
1890