xref: /spdk/module/bdev/virtio/bdev_virtio_scsi.c (revision 488570ebd418ba07c9e69e65106dcc964f3bb41b)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/bdev.h"
9 #include "spdk/endian.h"
10 #include "spdk/env.h"
11 #include "spdk/thread.h"
12 #include "spdk/scsi_spec.h"
13 #include "spdk/string.h"
14 #include "spdk/util.h"
15 #include "spdk/json.h"
16 
17 #include "spdk/bdev_module.h"
18 #include "spdk/log.h"
19 #include "spdk_internal/virtio.h"
20 #include "spdk_internal/vhost_user.h"
21 
22 #include <linux/virtio_scsi.h>
23 #include <linux/virtio_ids.h>
24 
25 #include "bdev_virtio.h"
26 
27 #define BDEV_VIRTIO_MAX_TARGET 64
28 #define BDEV_VIRTIO_SCAN_PAYLOAD_SIZE 256
29 #define MGMT_POLL_PERIOD_US (1000 * 5)
30 #define CTRLQ_RING_SIZE 16
31 #define SCAN_REQUEST_RETRIES 5
32 
33 /* Number of non-request queues - eventq and controlq */
34 #define SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED 2
35 
36 #define VIRTIO_SCSI_EVENTQ_BUFFER_COUNT 16
37 
38 #define VIRTIO_SCSI_CONTROLQ	0
39 #define VIRTIO_SCSI_EVENTQ	1
40 #define VIRTIO_SCSI_REQUESTQ	2
41 
42 static int bdev_virtio_initialize(void);
43 static void bdev_virtio_finish(void);
44 
45 struct virtio_scsi_dev {
46 	/* Generic virtio device data. */
47 	struct virtio_dev		vdev;
48 
49 	/** Detected SCSI LUNs */
50 	TAILQ_HEAD(, virtio_scsi_disk)	luns;
51 
52 	/** Context for the SCSI target scan. */
53 	struct virtio_scsi_scan_base	*scan_ctx;
54 
55 	/** Controlq poller. */
56 	struct spdk_poller		*mgmt_poller;
57 
58 	/** Controlq messages to be sent. */
59 	struct spdk_ring		*ctrlq_ring;
60 
61 	/** Buffers for the eventq. */
62 	struct virtio_scsi_eventq_io	*eventq_ios;
63 
64 	/** Device marked for removal. */
65 	bool				removed;
66 
67 	/** Callback to be called after vdev removal. */
68 	bdev_virtio_remove_cb		remove_cb;
69 
70 	/** Context for the `remove_cb`. */
71 	void				*remove_ctx;
72 
73 	TAILQ_ENTRY(virtio_scsi_dev) tailq;
74 };
75 
76 struct virtio_scsi_io_ctx {
77 	struct iovec			iov_req;
78 	struct iovec			iov_resp;
79 	union {
80 		struct virtio_scsi_cmd_req req;
81 		struct virtio_scsi_ctrl_tmf_req tmf_req;
82 	};
83 	union {
84 		struct virtio_scsi_cmd_resp resp;
85 		struct virtio_scsi_ctrl_tmf_resp tmf_resp;
86 	};
87 };
88 
89 struct virtio_scsi_eventq_io {
90 	struct iovec			iov;
91 	struct virtio_scsi_event	ev;
92 };
93 
94 struct virtio_scsi_scan_info {
95 	uint64_t			num_blocks;
96 	uint32_t			block_size;
97 	uint8_t				target;
98 	bool				unmap_supported;
99 	TAILQ_ENTRY(virtio_scsi_scan_info) tailq;
100 };
101 
102 struct virtio_scsi_scan_base {
103 	struct virtio_scsi_dev		*svdev;
104 
105 	/** I/O channel used for the scan I/O. */
106 	struct bdev_virtio_io_channel	*channel;
107 
108 	bdev_virtio_create_cb		cb_fn;
109 	void				*cb_arg;
110 
111 	/** Scan all targets on the device. */
112 	bool				full_scan;
113 
114 	/** Start a full rescan after receiving next scan I/O response. */
115 	bool				restart;
116 
117 	/** Additional targets to be (re)scanned. */
118 	TAILQ_HEAD(, virtio_scsi_scan_info) scan_queue;
119 
120 	/** Remaining attempts for sending the current request. */
121 	unsigned                        retries;
122 
123 	/** If set, the last scan I/O needs to be resent */
124 	bool				needs_resend;
125 
126 	struct virtio_scsi_io_ctx	io_ctx;
127 	struct iovec			iov;
128 	uint8_t				payload[BDEV_VIRTIO_SCAN_PAYLOAD_SIZE];
129 
130 	/** Scan results for the current target. */
131 	struct virtio_scsi_scan_info	info;
132 };
133 
134 struct virtio_scsi_disk {
135 	struct spdk_bdev		bdev;
136 	struct virtio_scsi_dev		*svdev;
137 	struct virtio_scsi_scan_info	info;
138 
139 	/** Descriptor opened just to be notified of external bdev hotremove. */
140 	struct spdk_bdev_desc		*notify_desc;
141 
142 	/** Disk marked for removal. */
143 	bool				removed;
144 	TAILQ_ENTRY(virtio_scsi_disk)	link;
145 };
146 
147 struct bdev_virtio_io_channel {
148 	struct virtio_scsi_dev	*svdev;
149 
150 	/** Virtqueue exclusively assigned to this channel. */
151 	struct virtqueue	*vq;
152 
153 	/** Virtio response poller. */
154 	struct spdk_poller	*poller;
155 };
156 
157 static TAILQ_HEAD(, virtio_scsi_dev) g_virtio_scsi_devs =
158 	TAILQ_HEAD_INITIALIZER(g_virtio_scsi_devs);
159 
160 static pthread_mutex_t g_virtio_scsi_mutex = PTHREAD_MUTEX_INITIALIZER;
161 
162 /** Module finish in progress */
163 static bool g_bdev_virtio_finish = false;
164 
165 /* Features desired/implemented by this driver. */
166 #define VIRTIO_SCSI_DEV_SUPPORTED_FEATURES		\
167 	(1ULL << VIRTIO_SCSI_F_INOUT		|	\
168 	 1ULL << VIRTIO_SCSI_F_HOTPLUG		|	\
169 	 1ULL << VIRTIO_RING_F_EVENT_IDX	|	\
170 	 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
171 
172 static void virtio_scsi_dev_unregister_cb(void *io_device);
173 static void virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev,
174 				   bdev_virtio_remove_cb cb_fn, void *cb_arg);
175 static int bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf);
176 static void bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf);
177 static void process_scan_resp(struct virtio_scsi_scan_base *base);
178 static int bdev_virtio_mgmt_poll(void *arg);
179 
180 static int
181 virtio_scsi_dev_send_eventq_io(struct virtqueue *vq, struct virtio_scsi_eventq_io *io)
182 {
183 	int rc;
184 
185 	rc = virtqueue_req_start(vq, io, 1);
186 	if (rc != 0) {
187 		return -1;
188 	}
189 
190 	virtqueue_req_add_iovs(vq, &io->iov, 1, SPDK_VIRTIO_DESC_WR);
191 	virtqueue_req_flush(vq);
192 
193 	return 0;
194 }
195 
196 static int
197 virtio_scsi_dev_init(struct virtio_scsi_dev *svdev, uint16_t max_queues)
198 {
199 	struct virtio_dev *vdev = &svdev->vdev;
200 	struct spdk_ring *ctrlq_ring;
201 	struct virtio_scsi_eventq_io *eventq_io;
202 	struct virtqueue *eventq;
203 	uint16_t i, num_events;
204 	int rc;
205 
206 	rc = virtio_dev_reset(vdev, VIRTIO_SCSI_DEV_SUPPORTED_FEATURES);
207 	if (rc != 0) {
208 		return rc;
209 	}
210 
211 	rc = virtio_dev_start(vdev, max_queues, SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED);
212 	if (rc != 0) {
213 		return rc;
214 	}
215 
216 	ctrlq_ring = spdk_ring_create(SPDK_RING_TYPE_MP_SC, CTRLQ_RING_SIZE,
217 				      SPDK_ENV_SOCKET_ID_ANY);
218 	if (ctrlq_ring == NULL) {
219 		SPDK_ERRLOG("Failed to allocate send ring for the controlq.\n");
220 		return -1;
221 	}
222 
223 	rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_CONTROLQ);
224 	if (rc != 0) {
225 		SPDK_ERRLOG("Failed to acquire the controlq.\n");
226 		spdk_ring_free(ctrlq_ring);
227 		return -1;
228 	}
229 
230 	rc = virtio_dev_acquire_queue(vdev, VIRTIO_SCSI_EVENTQ);
231 	if (rc != 0) {
232 		SPDK_ERRLOG("Failed to acquire the eventq.\n");
233 		virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ);
234 		spdk_ring_free(ctrlq_ring);
235 		return -1;
236 	}
237 
238 	eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ];
239 	num_events = spdk_min(eventq->vq_nentries, VIRTIO_SCSI_EVENTQ_BUFFER_COUNT);
240 	svdev->eventq_ios = spdk_zmalloc(sizeof(*svdev->eventq_ios) * num_events,
241 					 0, NULL, SPDK_ENV_LCORE_ID_ANY,
242 					 SPDK_MALLOC_DMA);
243 	if (svdev->eventq_ios == NULL) {
244 		SPDK_ERRLOG("cannot allocate memory for %"PRIu16" eventq buffers\n",
245 			    num_events);
246 		virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ);
247 		virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ);
248 		spdk_ring_free(ctrlq_ring);
249 		return -1;
250 	}
251 
252 	for (i = 0; i < num_events; i++) {
253 		eventq_io = &svdev->eventq_ios[i];
254 		eventq_io->iov.iov_base = &eventq_io->ev;
255 		eventq_io->iov.iov_len = sizeof(eventq_io->ev);
256 		virtio_scsi_dev_send_eventq_io(eventq, eventq_io);
257 	}
258 
259 	svdev->ctrlq_ring = ctrlq_ring;
260 
261 	svdev->mgmt_poller = SPDK_POLLER_REGISTER(bdev_virtio_mgmt_poll, svdev,
262 			     MGMT_POLL_PERIOD_US);
263 
264 	TAILQ_INIT(&svdev->luns);
265 	svdev->scan_ctx = NULL;
266 	svdev->removed = false;
267 	svdev->remove_cb = NULL;
268 	svdev->remove_ctx = NULL;
269 
270 	spdk_io_device_register(svdev, bdev_virtio_scsi_ch_create_cb,
271 				bdev_virtio_scsi_ch_destroy_cb,
272 				sizeof(struct bdev_virtio_io_channel),
273 				svdev->vdev.name);
274 
275 	pthread_mutex_lock(&g_virtio_scsi_mutex);
276 	TAILQ_INSERT_TAIL(&g_virtio_scsi_devs, svdev, tailq);
277 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
278 	return 0;
279 }
280 
281 static struct virtio_scsi_dev *
282 virtio_pci_scsi_dev_create(const char *name, struct virtio_pci_ctx *pci_ctx)
283 {
284 	static int pci_dev_counter = 0;
285 	struct virtio_scsi_dev *svdev;
286 	struct virtio_dev *vdev;
287 	char *default_name = NULL;
288 	uint32_t num_queues;
289 	int rc;
290 
291 	svdev = calloc(1, sizeof(*svdev));
292 	if (svdev == NULL) {
293 		SPDK_ERRLOG("virtio device calloc failed\n");
294 		return NULL;
295 	}
296 
297 	vdev = &svdev->vdev;
298 	if (name == NULL) {
299 		default_name = spdk_sprintf_alloc("VirtioScsi%"PRIu32, pci_dev_counter++);
300 		if (default_name == NULL) {
301 			free(vdev);
302 			return NULL;
303 		}
304 		name = default_name;
305 	}
306 
307 	rc = virtio_pci_dev_init(vdev, name, pci_ctx);
308 	free(default_name);
309 
310 	if (rc != 0) {
311 		free(svdev);
312 		return NULL;
313 	}
314 
315 	rc = virtio_dev_read_dev_config(vdev, offsetof(struct virtio_scsi_config, num_queues),
316 					&num_queues, sizeof(num_queues));
317 	if (rc) {
318 		SPDK_ERRLOG("%s: config read failed: %s\n", vdev->name, spdk_strerror(-rc));
319 		goto fail;
320 	}
321 
322 	rc = virtio_scsi_dev_init(svdev, num_queues);
323 	if (rc != 0) {
324 		goto fail;
325 	}
326 
327 	return svdev;
328 
329 fail:
330 	vdev->ctx = NULL;
331 	virtio_dev_destruct(vdev);
332 	free(svdev);
333 	return NULL;
334 }
335 
336 static struct virtio_scsi_dev *
337 virtio_user_scsi_dev_create(const char *name, const char *path,
338 			    uint16_t num_queues, uint32_t queue_size)
339 {
340 	struct virtio_scsi_dev *svdev;
341 	struct virtio_dev *vdev;
342 	int rc;
343 
344 	svdev = calloc(1, sizeof(*svdev));
345 	if (svdev == NULL) {
346 		SPDK_ERRLOG("calloc failed for virtio device %s: %s\n", name, path);
347 		return NULL;
348 	}
349 
350 	vdev = &svdev->vdev;
351 	rc = virtio_user_dev_init(vdev, name, path, queue_size);
352 	if (rc != 0) {
353 		SPDK_ERRLOG("Failed to create virito device %s: %s\n", name, path);
354 		free(svdev);
355 		return NULL;
356 	}
357 
358 	rc = virtio_scsi_dev_init(svdev, num_queues);
359 	if (rc != 0) {
360 		virtio_dev_destruct(vdev);
361 		free(svdev);
362 		return NULL;
363 	}
364 
365 	return svdev;
366 }
367 
368 static struct virtio_scsi_disk *
369 virtio_scsi_dev_get_disk_by_id(struct virtio_scsi_dev *svdev, uint8_t target_id)
370 {
371 	struct virtio_scsi_disk *disk;
372 
373 	TAILQ_FOREACH(disk, &svdev->luns, link) {
374 		if (disk->info.target == target_id) {
375 			return disk;
376 		}
377 	}
378 
379 	return NULL;
380 }
381 
382 static int virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev,
383 				bdev_virtio_create_cb cb_fn, void *cb_arg);
384 static int send_scan_io(struct virtio_scsi_scan_base *base);
385 static void _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target);
386 static int _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc);
387 static void _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum);
388 static int virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target);
389 
390 static int
391 bdev_virtio_get_ctx_size(void)
392 {
393 	return sizeof(struct virtio_scsi_io_ctx);
394 }
395 
396 static int
397 bdev_virtio_scsi_config_json(struct spdk_json_write_ctx *w)
398 {
399 	struct virtio_scsi_dev *svdev;
400 
401 	pthread_mutex_lock(&g_virtio_scsi_mutex);
402 	TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) {
403 		spdk_json_write_object_begin(w);
404 
405 		spdk_json_write_named_string(w, "method", "bdev_virtio_attach_controller");
406 
407 		spdk_json_write_named_object_begin(w, "params");
408 		spdk_json_write_named_string(w, "name", svdev->vdev.name);
409 		spdk_json_write_named_string(w, "dev_type", "scsi");
410 
411 		/* Write transport specific parameters. */
412 		svdev->vdev.backend_ops->write_json_config(&svdev->vdev, w);
413 
414 		spdk_json_write_object_end(w);
415 
416 		spdk_json_write_object_end(w);
417 
418 	}
419 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
420 
421 	return 0;
422 }
423 
424 
425 static struct spdk_bdev_module virtio_scsi_if = {
426 	.name = "virtio_scsi",
427 	.module_init = bdev_virtio_initialize,
428 	.module_fini = bdev_virtio_finish,
429 	.get_ctx_size = bdev_virtio_get_ctx_size,
430 	.config_json = bdev_virtio_scsi_config_json,
431 	.async_fini = true,
432 };
433 
434 SPDK_BDEV_MODULE_REGISTER(virtio_scsi, &virtio_scsi_if)
435 
436 static struct virtio_scsi_io_ctx *
437 bdev_virtio_init_io_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
438 {
439 	struct virtio_scsi_cmd_req *req;
440 	struct virtio_scsi_cmd_resp *resp;
441 	struct virtio_scsi_disk *disk = (struct virtio_scsi_disk *)bdev_io->bdev;
442 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
443 
444 	req = &io_ctx->req;
445 	resp = &io_ctx->resp;
446 
447 	io_ctx->iov_req.iov_base = req;
448 	io_ctx->iov_req.iov_len = sizeof(*req);
449 
450 	io_ctx->iov_resp.iov_base = resp;
451 	io_ctx->iov_resp.iov_len = sizeof(*resp);
452 
453 	memset(req, 0, sizeof(*req));
454 	req->lun[0] = 1;
455 	req->lun[1] = disk->info.target;
456 
457 	return io_ctx;
458 }
459 
460 static struct virtio_scsi_io_ctx *
461 bdev_virtio_init_tmf_vreq(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
462 {
463 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
464 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
465 	struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev);
466 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
467 
468 	tmf_req = &io_ctx->tmf_req;
469 	tmf_resp = &io_ctx->tmf_resp;
470 
471 	io_ctx->iov_req.iov_base = tmf_req;
472 	io_ctx->iov_req.iov_len = sizeof(*tmf_req);
473 	io_ctx->iov_resp.iov_base = tmf_resp;
474 	io_ctx->iov_resp.iov_len = sizeof(*tmf_resp);
475 
476 	memset(tmf_req, 0, sizeof(*tmf_req));
477 	tmf_req->lun[0] = 1;
478 	tmf_req->lun[1] = disk->info.target;
479 
480 	return io_ctx;
481 }
482 
483 static void
484 bdev_virtio_send_io(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
485 {
486 	struct bdev_virtio_io_channel *virtio_channel = spdk_io_channel_get_ctx(ch);
487 	struct virtqueue *vq = virtio_channel->vq;
488 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
489 	int rc;
490 
491 	rc = virtqueue_req_start(vq, bdev_io, bdev_io->u.bdev.iovcnt + 2);
492 	if (rc == -ENOMEM) {
493 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
494 		return;
495 	} else if (rc != 0) {
496 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
497 		return;
498 	}
499 
500 	virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO);
501 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
502 		virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
503 		virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
504 				       SPDK_VIRTIO_DESC_WR);
505 	} else {
506 		virtqueue_req_add_iovs(vq, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
507 				       SPDK_VIRTIO_DESC_RO);
508 		virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
509 	}
510 
511 	virtqueue_req_flush(vq);
512 }
513 
514 static void
515 bdev_virtio_rw(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
516 {
517 	struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev);
518 	struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io);
519 	struct virtio_scsi_cmd_req *req = &io_ctx->req;
520 	bool is_write = bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE;
521 
522 	if (disk->info.num_blocks > (1ULL << 32)) {
523 		req->cdb[0] = is_write ? SPDK_SBC_WRITE_16 : SPDK_SBC_READ_16;
524 		to_be64(&req->cdb[2], bdev_io->u.bdev.offset_blocks);
525 		to_be32(&req->cdb[10], bdev_io->u.bdev.num_blocks);
526 	} else {
527 		req->cdb[0] = is_write ? SPDK_SBC_WRITE_10 : SPDK_SBC_READ_10;
528 		to_be32(&req->cdb[2], bdev_io->u.bdev.offset_blocks);
529 		to_be16(&req->cdb[7], bdev_io->u.bdev.num_blocks);
530 	}
531 
532 	bdev_virtio_send_io(ch, bdev_io);
533 }
534 
535 static void
536 bdev_virtio_reset(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
537 {
538 	struct bdev_virtio_io_channel *virtio_ch = spdk_io_channel_get_ctx(ch);
539 	struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_tmf_vreq(ch, bdev_io);
540 	struct virtio_scsi_ctrl_tmf_req *tmf_req = &io_ctx->tmf_req;
541 	struct virtio_scsi_dev *svdev = virtio_ch->svdev;
542 	size_t enqueued_count;
543 
544 	tmf_req->type = VIRTIO_SCSI_T_TMF;
545 	tmf_req->subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
546 
547 	enqueued_count = spdk_ring_enqueue(svdev->ctrlq_ring, (void **)&bdev_io, 1, NULL);
548 	if (spdk_likely(enqueued_count == 1)) {
549 		return;
550 	} else {
551 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
552 	}
553 }
554 
555 static void
556 bdev_virtio_unmap(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
557 {
558 	struct virtio_scsi_io_ctx *io_ctx = bdev_virtio_init_io_vreq(ch, bdev_io);
559 	struct virtio_scsi_cmd_req *req = &io_ctx->req;
560 	struct spdk_scsi_unmap_bdesc *desc, *first_desc;
561 	uint8_t *buf;
562 	uint64_t offset_blocks, num_blocks;
563 	uint16_t cmd_len;
564 
565 	if (!success) {
566 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
567 		return;
568 	}
569 
570 	buf = bdev_io->u.bdev.iovs[0].iov_base;
571 
572 	offset_blocks = bdev_io->u.bdev.offset_blocks;
573 	num_blocks = bdev_io->u.bdev.num_blocks;
574 
575 	/* (n-1) * 16-byte descriptors */
576 	first_desc = desc = (struct spdk_scsi_unmap_bdesc *)&buf[8];
577 	while (num_blocks > UINT32_MAX) {
578 		to_be64(&desc->lba, offset_blocks);
579 		to_be32(&desc->block_count, UINT32_MAX);
580 		memset(&desc->reserved, 0, sizeof(desc->reserved));
581 		offset_blocks += UINT32_MAX;
582 		num_blocks -= UINT32_MAX;
583 		desc++;
584 	}
585 
586 	/* The last descriptor with block_count <= UINT32_MAX */
587 	to_be64(&desc->lba, offset_blocks);
588 	to_be32(&desc->block_count, num_blocks);
589 	memset(&desc->reserved, 0, sizeof(desc->reserved));
590 
591 	/* 8-byte header + n * 16-byte block descriptor */
592 	cmd_len = 8 + (desc - first_desc + 1) *  sizeof(struct spdk_scsi_unmap_bdesc);
593 
594 	req->cdb[0] = SPDK_SBC_UNMAP;
595 	to_be16(&req->cdb[7], cmd_len);
596 
597 	/* 8-byte header */
598 	to_be16(&buf[0], cmd_len - 2); /* total length (excluding the length field) */
599 	to_be16(&buf[2], cmd_len - 8); /* length of block descriptors */
600 	memset(&buf[4], 0, 4); /* reserved */
601 
602 	bdev_virtio_send_io(ch, bdev_io);
603 }
604 
605 static void
606 bdev_virtio_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
607 		       bool success)
608 {
609 	if (!success) {
610 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
611 		return;
612 	}
613 
614 	bdev_virtio_rw(ch, bdev_io);
615 }
616 
617 static int _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
618 {
619 	struct virtio_scsi_disk *disk = SPDK_CONTAINEROF(bdev_io->bdev, struct virtio_scsi_disk, bdev);
620 
621 	switch (bdev_io->type) {
622 	case SPDK_BDEV_IO_TYPE_READ:
623 		spdk_bdev_io_get_buf(bdev_io, bdev_virtio_get_buf_cb,
624 				     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
625 		return 0;
626 	case SPDK_BDEV_IO_TYPE_WRITE:
627 		bdev_virtio_rw(ch, bdev_io);
628 		return 0;
629 	case SPDK_BDEV_IO_TYPE_RESET:
630 		bdev_virtio_reset(ch, bdev_io);
631 		return 0;
632 	case SPDK_BDEV_IO_TYPE_UNMAP: {
633 		uint64_t buf_len = 8 /* header size */ +
634 				   (bdev_io->u.bdev.num_blocks + UINT32_MAX - 1) /
635 				   UINT32_MAX * sizeof(struct spdk_scsi_unmap_bdesc);
636 
637 		if (!disk->info.unmap_supported) {
638 			return -1;
639 		}
640 
641 		if (buf_len > SPDK_BDEV_LARGE_BUF_MAX_SIZE) {
642 			SPDK_ERRLOG("Trying to UNMAP too many blocks: %"PRIu64"\n",
643 				    bdev_io->u.bdev.num_blocks);
644 			return -1;
645 		}
646 		spdk_bdev_io_get_buf(bdev_io, bdev_virtio_unmap, buf_len);
647 		return 0;
648 	}
649 	case SPDK_BDEV_IO_TYPE_FLUSH:
650 	default:
651 		return -1;
652 	}
653 	return 0;
654 }
655 
656 static void bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
657 {
658 	if (_bdev_virtio_submit_request(ch, bdev_io) < 0) {
659 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
660 	}
661 }
662 
663 static bool
664 bdev_virtio_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
665 {
666 	struct virtio_scsi_disk *disk = ctx;
667 
668 	switch (io_type) {
669 	case SPDK_BDEV_IO_TYPE_READ:
670 	case SPDK_BDEV_IO_TYPE_WRITE:
671 	case SPDK_BDEV_IO_TYPE_FLUSH:
672 	case SPDK_BDEV_IO_TYPE_RESET:
673 		return true;
674 
675 	case SPDK_BDEV_IO_TYPE_UNMAP:
676 		return disk->info.unmap_supported;
677 
678 	default:
679 		return false;
680 	}
681 }
682 
683 static struct spdk_io_channel *
684 bdev_virtio_get_io_channel(void *ctx)
685 {
686 	struct virtio_scsi_disk *disk = ctx;
687 
688 	return spdk_get_io_channel(disk->svdev);
689 }
690 
691 static int
692 bdev_virtio_disk_destruct(void *ctx)
693 {
694 	struct virtio_scsi_disk *disk = ctx;
695 	struct virtio_scsi_dev *svdev = disk->svdev;
696 
697 	TAILQ_REMOVE(&svdev->luns, disk, link);
698 	free(disk->bdev.name);
699 	free(disk);
700 
701 	if (svdev->removed && TAILQ_EMPTY(&svdev->luns)) {
702 		spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb);
703 	}
704 
705 	return 0;
706 }
707 
708 static int
709 bdev_virtio_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
710 {
711 	struct virtio_scsi_disk *disk = ctx;
712 
713 	virtio_dev_dump_json_info(&disk->svdev->vdev, w);
714 	return 0;
715 }
716 
717 static void
718 bdev_virtio_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
719 {
720 	/* SCSI targets and LUNS are discovered during scan process so nothing
721 	 * to save here.
722 	 */
723 }
724 
725 static const struct spdk_bdev_fn_table virtio_fn_table = {
726 	.destruct		= bdev_virtio_disk_destruct,
727 	.submit_request		= bdev_virtio_submit_request,
728 	.io_type_supported	= bdev_virtio_io_type_supported,
729 	.get_io_channel		= bdev_virtio_get_io_channel,
730 	.dump_info_json		= bdev_virtio_dump_info_json,
731 	.write_config_json	= bdev_virtio_write_config_json,
732 };
733 
734 static void
735 get_scsi_status(struct virtio_scsi_cmd_resp *resp, int *sk, int *asc, int *ascq)
736 {
737 	/* see spdk_scsi_task_build_sense_data() for sense data details */
738 	*sk = 0;
739 	*asc = 0;
740 	*ascq = 0;
741 
742 	if (resp->sense_len < 3) {
743 		return;
744 	}
745 
746 	*sk = resp->sense[2] & 0xf;
747 
748 	if (resp->sense_len < 13) {
749 		return;
750 	}
751 
752 	*asc = resp->sense[12];
753 
754 	if (resp->sense_len < 14) {
755 		return;
756 	}
757 
758 	*ascq = resp->sense[13];
759 }
760 
761 static void
762 bdev_virtio_io_cpl(struct spdk_bdev_io *bdev_io)
763 {
764 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
765 	int sk, asc, ascq;
766 
767 	get_scsi_status(&io_ctx->resp, &sk, &asc, &ascq);
768 	spdk_bdev_io_complete_scsi_status(bdev_io, io_ctx->resp.status, sk, asc, ascq);
769 }
770 
771 static int
772 bdev_virtio_poll(void *arg)
773 {
774 	struct bdev_virtio_io_channel *ch = arg;
775 	struct virtio_scsi_dev *svdev = ch->svdev;
776 	struct virtio_scsi_scan_base *scan_ctx = svdev->scan_ctx;
777 	void *io[32];
778 	uint32_t io_len[32];
779 	uint16_t i, cnt;
780 	int rc;
781 
782 	cnt = virtio_recv_pkts(ch->vq, (void **)io, io_len, SPDK_COUNTOF(io));
783 	for (i = 0; i < cnt; ++i) {
784 		if (spdk_unlikely(scan_ctx && io[i] == &scan_ctx->io_ctx)) {
785 			if (svdev->removed) {
786 				_virtio_scsi_dev_scan_finish(scan_ctx, -EINTR);
787 				return SPDK_POLLER_BUSY;
788 			}
789 
790 			if (scan_ctx->restart) {
791 				scan_ctx->restart = false;
792 				scan_ctx->full_scan = true;
793 				_virtio_scsi_dev_scan_tgt(scan_ctx, 0);
794 				continue;
795 			}
796 
797 			process_scan_resp(scan_ctx);
798 			continue;
799 		}
800 
801 		bdev_virtio_io_cpl(io[i]);
802 	}
803 
804 	if (spdk_unlikely(scan_ctx && scan_ctx->needs_resend)) {
805 		if (svdev->removed) {
806 			_virtio_scsi_dev_scan_finish(scan_ctx, -EINTR);
807 			return SPDK_POLLER_BUSY;
808 		} else if (cnt == 0) {
809 			return SPDK_POLLER_IDLE;
810 		}
811 
812 		rc = send_scan_io(scan_ctx);
813 		if (rc != 0) {
814 			assert(scan_ctx->retries > 0);
815 			scan_ctx->retries--;
816 			if (scan_ctx->retries == 0) {
817 				SPDK_ERRLOG("Target scan failed unrecoverably with rc = %d.\n", rc);
818 				_virtio_scsi_dev_scan_finish(scan_ctx, rc);
819 			}
820 		}
821 	}
822 
823 	return cnt;
824 }
825 
826 static void
827 bdev_virtio_tmf_cpl_cb(void *ctx)
828 {
829 	struct spdk_bdev_io *bdev_io = ctx;
830 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
831 
832 	if (io_ctx->tmf_resp.response == VIRTIO_SCSI_S_OK) {
833 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
834 	} else {
835 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
836 	}
837 }
838 
839 static void
840 bdev_virtio_tmf_cpl(struct spdk_bdev_io *bdev_io)
841 {
842 	spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), bdev_virtio_tmf_cpl_cb, bdev_io);
843 }
844 
845 static void
846 bdev_virtio_eventq_io_cpl(struct virtio_scsi_dev *svdev, struct virtio_scsi_eventq_io *io)
847 {
848 	struct virtio_scsi_event *ev = &io->ev;
849 	struct virtio_scsi_disk *disk;
850 
851 	if (ev->lun[0] != 1) {
852 		SPDK_WARNLOG("Received an event with invalid data layout.\n");
853 		goto out;
854 	}
855 
856 	if (ev->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
857 		ev->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
858 		virtio_scsi_dev_scan(svdev, NULL, NULL);
859 	}
860 
861 	switch (ev->event) {
862 	case VIRTIO_SCSI_T_NO_EVENT:
863 		break;
864 	case VIRTIO_SCSI_T_TRANSPORT_RESET:
865 		switch (ev->reason) {
866 		case VIRTIO_SCSI_EVT_RESET_RESCAN:
867 			virtio_scsi_dev_scan_tgt(svdev, ev->lun[1]);
868 			break;
869 		case VIRTIO_SCSI_EVT_RESET_REMOVED:
870 			disk = virtio_scsi_dev_get_disk_by_id(svdev, ev->lun[1]);
871 			if (disk != NULL) {
872 				spdk_bdev_unregister(&disk->bdev, NULL, NULL);
873 			}
874 			break;
875 		default:
876 			break;
877 		}
878 		break;
879 	default:
880 		break;
881 	}
882 
883 out:
884 	virtio_scsi_dev_send_eventq_io(svdev->vdev.vqs[VIRTIO_SCSI_EVENTQ], io);
885 }
886 
887 static void
888 bdev_virtio_tmf_abort_nomem_cb(void *ctx)
889 {
890 	struct spdk_bdev_io *bdev_io = ctx;
891 
892 	spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
893 }
894 
895 static void
896 bdev_virtio_tmf_abort_ioerr_cb(void *ctx)
897 {
898 	struct spdk_bdev_io *bdev_io = ctx;
899 
900 	spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
901 }
902 
903 static void
904 bdev_virtio_tmf_abort(struct spdk_bdev_io *bdev_io, int status)
905 {
906 	spdk_msg_fn fn;
907 
908 	if (status == -ENOMEM) {
909 		fn = bdev_virtio_tmf_abort_nomem_cb;
910 	} else {
911 		fn = bdev_virtio_tmf_abort_ioerr_cb;
912 	}
913 
914 	spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io), fn, bdev_io);
915 }
916 
917 static int
918 bdev_virtio_send_tmf_io(struct virtqueue *ctrlq, struct spdk_bdev_io *bdev_io)
919 {
920 	struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
921 	int rc;
922 
923 	rc = virtqueue_req_start(ctrlq, bdev_io, 2);
924 	if (rc != 0) {
925 		return rc;
926 	}
927 
928 	virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO);
929 	virtqueue_req_add_iovs(ctrlq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
930 
931 	virtqueue_req_flush(ctrlq);
932 	return 0;
933 }
934 
935 static int
936 bdev_virtio_mgmt_poll(void *arg)
937 {
938 	struct virtio_scsi_dev *svdev = arg;
939 	struct virtio_dev *vdev = &svdev->vdev;
940 	struct virtqueue *eventq = vdev->vqs[VIRTIO_SCSI_EVENTQ];
941 	struct virtqueue *ctrlq = vdev->vqs[VIRTIO_SCSI_CONTROLQ];
942 	struct spdk_ring *send_ring = svdev->ctrlq_ring;
943 	void *io[16];
944 	uint32_t io_len[16];
945 	uint16_t i, cnt;
946 	int rc;
947 	int total = 0;
948 
949 	cnt = spdk_ring_dequeue(send_ring, io, SPDK_COUNTOF(io));
950 	total += cnt;
951 	for (i = 0; i < cnt; ++i) {
952 		rc = bdev_virtio_send_tmf_io(ctrlq, io[i]);
953 		if (rc != 0) {
954 			bdev_virtio_tmf_abort(io[i], rc);
955 		}
956 	}
957 
958 	cnt = virtio_recv_pkts(ctrlq, io, io_len, SPDK_COUNTOF(io));
959 	total += cnt;
960 	for (i = 0; i < cnt; ++i) {
961 		bdev_virtio_tmf_cpl(io[i]);
962 	}
963 
964 	cnt = virtio_recv_pkts(eventq, io, io_len, SPDK_COUNTOF(io));
965 	total += cnt;
966 	for (i = 0; i < cnt; ++i) {
967 		bdev_virtio_eventq_io_cpl(svdev, io[i]);
968 	}
969 
970 	return total;
971 }
972 
973 static int
974 bdev_virtio_scsi_ch_create_cb(void *io_device, void *ctx_buf)
975 {
976 	struct virtio_scsi_dev *svdev = io_device;
977 	struct virtio_dev *vdev = &svdev->vdev;
978 	struct bdev_virtio_io_channel *ch = ctx_buf;
979 	struct virtqueue *vq;
980 	int32_t queue_idx;
981 
982 	queue_idx = virtio_dev_find_and_acquire_queue(vdev, VIRTIO_SCSI_REQUESTQ);
983 	if (queue_idx < 0) {
984 		SPDK_ERRLOG("Couldn't get an unused queue for the io_channel.\n");
985 		return -1;
986 	}
987 
988 	vq = vdev->vqs[queue_idx];
989 
990 	ch->svdev = svdev;
991 	ch->vq = vq;
992 
993 	ch->poller = SPDK_POLLER_REGISTER(bdev_virtio_poll, ch, 0);
994 
995 	return 0;
996 }
997 
998 static void
999 bdev_virtio_scsi_ch_destroy_cb(void *io_device, void *ctx_buf)
1000 {
1001 	struct bdev_virtio_io_channel *ch = ctx_buf;
1002 	struct virtio_scsi_dev *svdev = ch->svdev;
1003 	struct virtio_dev *vdev = &svdev->vdev;
1004 	struct virtqueue *vq = ch->vq;
1005 
1006 	spdk_poller_unregister(&ch->poller);
1007 	virtio_dev_release_queue(vdev, vq->vq_queue_index);
1008 }
1009 
1010 static void
1011 _virtio_scsi_dev_scan_finish(struct virtio_scsi_scan_base *base, int errnum)
1012 {
1013 	struct virtio_scsi_dev *svdev = base->svdev;
1014 	size_t bdevs_cnt;
1015 	struct spdk_bdev *bdevs[BDEV_VIRTIO_MAX_TARGET];
1016 	struct virtio_scsi_disk *disk;
1017 	struct virtio_scsi_scan_info *tgt, *next_tgt;
1018 
1019 	spdk_put_io_channel(spdk_io_channel_from_ctx(base->channel));
1020 	base->svdev->scan_ctx = NULL;
1021 
1022 	TAILQ_FOREACH_SAFE(tgt, &base->scan_queue, tailq, next_tgt) {
1023 		TAILQ_REMOVE(&base->scan_queue, tgt, tailq);
1024 		free(tgt);
1025 	}
1026 
1027 	if (base->cb_fn == NULL) {
1028 		spdk_free(base);
1029 		return;
1030 	}
1031 
1032 	bdevs_cnt = 0;
1033 	if (errnum == 0) {
1034 		TAILQ_FOREACH(disk, &svdev->luns, link) {
1035 			bdevs[bdevs_cnt] = &disk->bdev;
1036 			bdevs_cnt++;
1037 		}
1038 	}
1039 
1040 	base->cb_fn(base->cb_arg, errnum, bdevs, bdevs_cnt);
1041 	spdk_free(base);
1042 }
1043 
1044 static int
1045 send_scan_io(struct virtio_scsi_scan_base *base)
1046 {
1047 	struct virtio_scsi_io_ctx *io_ctx = &base->io_ctx;
1048 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1049 	struct virtqueue *vq = base->channel->vq;
1050 	int payload_iov_cnt = base->iov.iov_len > 0 ? 1 : 0;
1051 	int rc;
1052 
1053 	req->lun[0] = 1;
1054 	req->lun[1] = base->info.target;
1055 
1056 	rc = virtqueue_req_start(vq, io_ctx, 2 + payload_iov_cnt);
1057 	if (rc != 0) {
1058 		base->needs_resend = true;
1059 		return -1;
1060 	}
1061 
1062 	virtqueue_req_add_iovs(vq, &io_ctx->iov_req, 1, SPDK_VIRTIO_DESC_RO);
1063 	virtqueue_req_add_iovs(vq, &io_ctx->iov_resp, 1, SPDK_VIRTIO_DESC_WR);
1064 	virtqueue_req_add_iovs(vq, &base->iov, payload_iov_cnt, SPDK_VIRTIO_DESC_WR);
1065 
1066 	virtqueue_req_flush(vq);
1067 	return 0;
1068 }
1069 
1070 static int
1071 send_inquiry(struct virtio_scsi_scan_base *base)
1072 {
1073 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1074 	struct spdk_scsi_cdb_inquiry *cdb;
1075 
1076 	memset(req, 0, sizeof(*req));
1077 
1078 	base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE;
1079 	cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb;
1080 	cdb->opcode = SPDK_SPC_INQUIRY;
1081 	to_be16(cdb->alloc_len, BDEV_VIRTIO_SCAN_PAYLOAD_SIZE);
1082 
1083 	return send_scan_io(base);
1084 }
1085 
1086 static int
1087 send_inquiry_vpd(struct virtio_scsi_scan_base *base, uint8_t page_code)
1088 {
1089 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1090 	struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb;
1091 
1092 	memset(req, 0, sizeof(*req));
1093 
1094 	base->iov.iov_len = BDEV_VIRTIO_SCAN_PAYLOAD_SIZE;
1095 	inquiry_cdb->opcode = SPDK_SPC_INQUIRY;
1096 	inquiry_cdb->evpd = 1;
1097 	inquiry_cdb->page_code = page_code;
1098 	to_be16(inquiry_cdb->alloc_len, base->iov.iov_len);
1099 
1100 	return send_scan_io(base);
1101 }
1102 
1103 static int
1104 send_read_cap_10(struct virtio_scsi_scan_base *base)
1105 {
1106 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1107 
1108 	memset(req, 0, sizeof(*req));
1109 
1110 	base->iov.iov_len = 8;
1111 	req->cdb[0] = SPDK_SBC_READ_CAPACITY_10;
1112 
1113 	return send_scan_io(base);
1114 }
1115 
1116 static int
1117 send_read_cap_16(struct virtio_scsi_scan_base *base)
1118 {
1119 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1120 
1121 	memset(req, 0, sizeof(*req));
1122 
1123 	base->iov.iov_len = 32;
1124 	req->cdb[0] = SPDK_SPC_SERVICE_ACTION_IN_16;
1125 	req->cdb[1] = SPDK_SBC_SAI_READ_CAPACITY_16;
1126 	to_be32(&req->cdb[10], base->iov.iov_len);
1127 
1128 	return send_scan_io(base);
1129 }
1130 
1131 static int
1132 send_test_unit_ready(struct virtio_scsi_scan_base *base)
1133 {
1134 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1135 
1136 	memset(req, 0, sizeof(*req));
1137 	req->cdb[0] = SPDK_SPC_TEST_UNIT_READY;
1138 	base->iov.iov_len = 0;
1139 
1140 	return send_scan_io(base);
1141 }
1142 
1143 static int
1144 send_start_stop_unit(struct virtio_scsi_scan_base *base)
1145 {
1146 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1147 
1148 	memset(req, 0, sizeof(*req));
1149 	req->cdb[0] = SPDK_SBC_START_STOP_UNIT;
1150 	req->cdb[4] = SPDK_SBC_START_STOP_UNIT_START_BIT;
1151 	base->iov.iov_len = 0;
1152 
1153 	return send_scan_io(base);
1154 }
1155 
1156 static int
1157 process_scan_start_stop_unit(struct virtio_scsi_scan_base *base)
1158 {
1159 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1160 
1161 	if (resp->status == SPDK_SCSI_STATUS_GOOD) {
1162 		return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES);
1163 	}
1164 
1165 	return -1;
1166 }
1167 
1168 static int
1169 process_scan_test_unit_ready(struct virtio_scsi_scan_base *base)
1170 {
1171 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1172 	int sk, asc, ascq;
1173 
1174 	get_scsi_status(resp, &sk, &asc, &ascq);
1175 
1176 	/* check response, get VPD if spun up otherwise send SSU */
1177 	if (resp->status == SPDK_SCSI_STATUS_GOOD) {
1178 		return send_inquiry_vpd(base, SPDK_SPC_VPD_SUPPORTED_VPD_PAGES);
1179 	} else if (resp->response == VIRTIO_SCSI_S_OK &&
1180 		   resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION &&
1181 		   sk == SPDK_SCSI_SENSE_UNIT_ATTENTION &&
1182 		   asc == SPDK_SCSI_ASC_LOGICAL_UNIT_NOT_READY) {
1183 		return send_start_stop_unit(base);
1184 	} else {
1185 		return -1;
1186 	}
1187 }
1188 
1189 static int
1190 process_scan_inquiry_standard(struct virtio_scsi_scan_base *base)
1191 {
1192 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1193 	struct spdk_scsi_cdb_inquiry_data *inquiry_data =
1194 		(struct spdk_scsi_cdb_inquiry_data *)base->payload;
1195 
1196 	if (resp->status != SPDK_SCSI_STATUS_GOOD) {
1197 		return -1;
1198 	}
1199 
1200 	/* check to make sure its a supported device */
1201 	if (inquiry_data->peripheral_device_type != SPDK_SPC_PERIPHERAL_DEVICE_TYPE_DISK ||
1202 	    inquiry_data->peripheral_qualifier != SPDK_SPC_PERIPHERAL_QUALIFIER_CONNECTED) {
1203 		SPDK_WARNLOG("Unsupported peripheral device type 0x%02x (qualifier 0x%02x)\n",
1204 			     inquiry_data->peripheral_device_type,
1205 			     inquiry_data->peripheral_qualifier);
1206 		return -1;
1207 	}
1208 
1209 	return send_test_unit_ready(base);
1210 }
1211 
1212 static int
1213 process_scan_inquiry_vpd_supported_vpd_pages(struct virtio_scsi_scan_base *base)
1214 {
1215 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1216 	bool block_provisioning_page_supported = false;
1217 
1218 	if (resp->status == SPDK_SCSI_STATUS_GOOD) {
1219 		const uint8_t *vpd_data = base->payload;
1220 		const uint8_t *supported_vpd_pages = vpd_data + 4;
1221 		uint16_t page_length;
1222 		uint16_t num_supported_pages;
1223 		uint16_t i;
1224 
1225 		page_length = from_be16(vpd_data + 2);
1226 		num_supported_pages = spdk_min(page_length, base->iov.iov_len - 4);
1227 
1228 		for (i = 0; i < num_supported_pages; i++) {
1229 			if (supported_vpd_pages[i] == SPDK_SPC_VPD_BLOCK_THIN_PROVISION) {
1230 				block_provisioning_page_supported = true;
1231 				break;
1232 			}
1233 		}
1234 	}
1235 
1236 	if (block_provisioning_page_supported) {
1237 		return send_inquiry_vpd(base, SPDK_SPC_VPD_BLOCK_THIN_PROVISION);
1238 	} else {
1239 		return send_read_cap_10(base);
1240 	}
1241 }
1242 
1243 static int
1244 process_scan_inquiry_vpd_block_thin_provision(struct virtio_scsi_scan_base *base)
1245 {
1246 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1247 
1248 	base->info.unmap_supported = false;
1249 
1250 	if (resp->status == SPDK_SCSI_STATUS_GOOD) {
1251 		uint8_t *vpd_data = base->payload;
1252 
1253 		base->info.unmap_supported = !!(vpd_data[5] & SPDK_SCSI_UNMAP_LBPU);
1254 	}
1255 
1256 	SPDK_INFOLOG(virtio, "Target %u: unmap supported = %d\n",
1257 		     base->info.target, (int)base->info.unmap_supported);
1258 
1259 	return send_read_cap_10(base);
1260 }
1261 
1262 static int
1263 process_scan_inquiry(struct virtio_scsi_scan_base *base)
1264 {
1265 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1266 	struct spdk_scsi_cdb_inquiry *inquiry_cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb;
1267 
1268 	if ((inquiry_cdb->evpd & 1) == 0) {
1269 		return process_scan_inquiry_standard(base);
1270 	}
1271 
1272 	switch (inquiry_cdb->page_code) {
1273 	case SPDK_SPC_VPD_SUPPORTED_VPD_PAGES:
1274 		return process_scan_inquiry_vpd_supported_vpd_pages(base);
1275 	case SPDK_SPC_VPD_BLOCK_THIN_PROVISION:
1276 		return process_scan_inquiry_vpd_block_thin_provision(base);
1277 	default:
1278 		SPDK_DEBUGLOG(virtio, "Unexpected VPD page 0x%02x\n", inquiry_cdb->page_code);
1279 		return -1;
1280 	}
1281 }
1282 
1283 static void
1284 bdev_virtio_disk_notify_remove(struct virtio_scsi_disk *disk)
1285 {
1286 	disk->removed = true;
1287 	spdk_bdev_close(disk->notify_desc);
1288 }
1289 
1290 static void
1291 bdev_virtio_disk_notify_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
1292 				 void *event_ctx)
1293 {
1294 	switch (type) {
1295 	case SPDK_BDEV_EVENT_REMOVE:
1296 		bdev_virtio_disk_notify_remove(event_ctx);
1297 		break;
1298 	default:
1299 		SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
1300 		break;
1301 	}
1302 }
1303 
1304 /* To be called only from the thread performing target scan */
1305 static int
1306 virtio_scsi_dev_add_tgt(struct virtio_scsi_dev *svdev, struct virtio_scsi_scan_info *info)
1307 {
1308 	struct virtio_scsi_disk *disk;
1309 	struct spdk_bdev *bdev;
1310 	int rc;
1311 
1312 	TAILQ_FOREACH(disk, &svdev->luns, link) {
1313 		if (disk->info.target == info->target) {
1314 			/* Target is already attached and param change is not supported */
1315 			return 0;
1316 		}
1317 	}
1318 
1319 	if (info->block_size == 0 || info->num_blocks == 0) {
1320 		SPDK_ERRLOG("%s: invalid target %u: bs=%"PRIu32" blocks=%"PRIu64"\n",
1321 			    svdev->vdev.name, info->target, info->block_size, info->num_blocks);
1322 		return -EINVAL;
1323 	}
1324 
1325 	disk = calloc(1, sizeof(*disk));
1326 	if (disk == NULL) {
1327 		SPDK_ERRLOG("could not allocate disk\n");
1328 		return -ENOMEM;
1329 	}
1330 
1331 	disk->svdev = svdev;
1332 	memcpy(&disk->info, info, sizeof(*info));
1333 
1334 	bdev = &disk->bdev;
1335 	bdev->name = spdk_sprintf_alloc("%st%"PRIu8, svdev->vdev.name, info->target);
1336 	if (bdev->name == NULL) {
1337 		SPDK_ERRLOG("Couldn't alloc memory for the bdev name.\n");
1338 		free(disk);
1339 		return -ENOMEM;
1340 	}
1341 
1342 	bdev->product_name = "Virtio SCSI Disk";
1343 	bdev->write_cache = 0;
1344 	bdev->blocklen = disk->info.block_size;
1345 	bdev->blockcnt = disk->info.num_blocks;
1346 
1347 	bdev->ctxt = disk;
1348 	bdev->fn_table = &virtio_fn_table;
1349 	bdev->module = &virtio_scsi_if;
1350 
1351 	rc = spdk_bdev_register(&disk->bdev);
1352 	if (rc) {
1353 		SPDK_ERRLOG("Failed to register bdev name=%s\n", disk->bdev.name);
1354 		free(bdev->name);
1355 		free(disk);
1356 		return rc;
1357 	}
1358 
1359 	rc = spdk_bdev_open_ext(bdev->name, false, bdev_virtio_disk_notify_event_cb,
1360 				disk, &disk->notify_desc);
1361 	if (rc) {
1362 		assert(false);
1363 	}
1364 
1365 	TAILQ_INSERT_TAIL(&svdev->luns, disk, link);
1366 	return 0;
1367 }
1368 
1369 static int
1370 process_read_cap_10(struct virtio_scsi_scan_base *base)
1371 {
1372 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1373 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1374 	uint64_t max_block;
1375 	uint32_t block_size;
1376 	uint8_t target_id = req->lun[1];
1377 	int rc;
1378 
1379 	if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) {
1380 		SPDK_ERRLOG("READ CAPACITY (10) failed for target %"PRIu8".\n", target_id);
1381 		return -1;
1382 	}
1383 
1384 	block_size = from_be32(base->payload + 4);
1385 	max_block = from_be32(base->payload);
1386 
1387 	if (max_block == 0xffffffff) {
1388 		return send_read_cap_16(base);
1389 	}
1390 
1391 	base->info.num_blocks = (uint64_t)max_block + 1;
1392 	base->info.block_size = block_size;
1393 
1394 	rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info);
1395 	if (rc != 0) {
1396 		return rc;
1397 	}
1398 
1399 	return _virtio_scsi_dev_scan_next(base, 0);
1400 }
1401 
1402 static int
1403 process_read_cap_16(struct virtio_scsi_scan_base *base)
1404 {
1405 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1406 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1407 	uint8_t target_id = req->lun[1];
1408 	int rc;
1409 
1410 	if (resp->response != VIRTIO_SCSI_S_OK || resp->status != SPDK_SCSI_STATUS_GOOD) {
1411 		SPDK_ERRLOG("READ CAPACITY (16) failed for target %"PRIu8".\n", target_id);
1412 		return -1;
1413 	}
1414 
1415 	base->info.num_blocks = from_be64(base->payload) + 1;
1416 	base->info.block_size = from_be32(base->payload + 8);
1417 	rc = virtio_scsi_dev_add_tgt(base->svdev, &base->info);
1418 	if (rc != 0) {
1419 		return rc;
1420 	}
1421 
1422 	return _virtio_scsi_dev_scan_next(base, 0);
1423 }
1424 
1425 static void
1426 process_scan_resp(struct virtio_scsi_scan_base *base)
1427 {
1428 	struct virtio_scsi_cmd_req *req = &base->io_ctx.req;
1429 	struct virtio_scsi_cmd_resp *resp = &base->io_ctx.resp;
1430 	int rc, sk, asc, ascq;
1431 	uint8_t target_id;
1432 
1433 	if (base->io_ctx.iov_req.iov_len < sizeof(struct virtio_scsi_cmd_req) ||
1434 	    base->io_ctx.iov_resp.iov_len < sizeof(struct virtio_scsi_cmd_resp)) {
1435 		SPDK_ERRLOG("Received target scan message with invalid length.\n");
1436 		_virtio_scsi_dev_scan_next(base, -EIO);
1437 		return;
1438 	}
1439 
1440 	get_scsi_status(resp, &sk, &asc, &ascq);
1441 	target_id = req->lun[1];
1442 
1443 	if (resp->response == VIRTIO_SCSI_S_BAD_TARGET ||
1444 	    resp->response == VIRTIO_SCSI_S_INCORRECT_LUN) {
1445 		_virtio_scsi_dev_scan_next(base, -ENODEV);
1446 		return;
1447 	}
1448 
1449 	if (resp->response != VIRTIO_SCSI_S_OK ||
1450 	    (resp->status == SPDK_SCSI_STATUS_CHECK_CONDITION &&
1451 	     sk != SPDK_SCSI_SENSE_ILLEGAL_REQUEST)) {
1452 		assert(base->retries > 0);
1453 		base->retries--;
1454 		if (base->retries == 0) {
1455 			SPDK_NOTICELOG("Target %"PRIu8" is present, but unavailable.\n", target_id);
1456 			SPDK_LOGDUMP(virtio, "CDB", req->cdb, sizeof(req->cdb));
1457 			SPDK_LOGDUMP(virtio, "SENSE DATA", resp->sense, sizeof(resp->sense));
1458 			_virtio_scsi_dev_scan_next(base, -EBUSY);
1459 			return;
1460 		}
1461 
1462 		/* resend the same request */
1463 		rc = send_scan_io(base);
1464 		if (rc != 0) {
1465 			/* Let response poller do the resend */
1466 		}
1467 		return;
1468 	}
1469 
1470 	base->retries = SCAN_REQUEST_RETRIES;
1471 
1472 	switch (req->cdb[0]) {
1473 	case SPDK_SPC_INQUIRY:
1474 		rc = process_scan_inquiry(base);
1475 		break;
1476 	case SPDK_SPC_TEST_UNIT_READY:
1477 		rc = process_scan_test_unit_ready(base);
1478 		break;
1479 	case SPDK_SBC_START_STOP_UNIT:
1480 		rc = process_scan_start_stop_unit(base);
1481 		break;
1482 	case SPDK_SBC_READ_CAPACITY_10:
1483 		rc = process_read_cap_10(base);
1484 		break;
1485 	case SPDK_SPC_SERVICE_ACTION_IN_16:
1486 		rc = process_read_cap_16(base);
1487 		break;
1488 	default:
1489 		SPDK_ERRLOG("Received invalid target scan message: cdb[0] = %"PRIu8".\n", req->cdb[0]);
1490 		rc = -1;
1491 		break;
1492 	}
1493 
1494 	if (rc != 0) {
1495 		if (base->needs_resend) {
1496 			return; /* Let response poller do the resend */
1497 		}
1498 
1499 		_virtio_scsi_dev_scan_next(base, rc);
1500 	}
1501 }
1502 
1503 static int
1504 _virtio_scsi_dev_scan_next(struct virtio_scsi_scan_base *base, int rc)
1505 {
1506 	struct virtio_scsi_scan_info *next;
1507 	struct virtio_scsi_disk *disk;
1508 	uint8_t target_id;
1509 
1510 	if (base->full_scan) {
1511 		if (rc != 0) {
1512 			disk = virtio_scsi_dev_get_disk_by_id(base->svdev,
1513 							      base->info.target);
1514 			if (disk != NULL) {
1515 				spdk_bdev_unregister(&disk->bdev, NULL, NULL);
1516 			}
1517 		}
1518 
1519 		target_id = base->info.target + 1;
1520 		if (target_id < BDEV_VIRTIO_MAX_TARGET) {
1521 			_virtio_scsi_dev_scan_tgt(base, target_id);
1522 			return 0;
1523 		}
1524 
1525 		base->full_scan = false;
1526 	}
1527 
1528 	next = TAILQ_FIRST(&base->scan_queue);
1529 	if (next == NULL) {
1530 		_virtio_scsi_dev_scan_finish(base, 0);
1531 		return 0;
1532 	}
1533 
1534 	TAILQ_REMOVE(&base->scan_queue, next, tailq);
1535 	target_id = next->target;
1536 	free(next);
1537 
1538 	_virtio_scsi_dev_scan_tgt(base, target_id);
1539 	return 0;
1540 }
1541 
1542 static int
1543 _virtio_scsi_dev_scan_init(struct virtio_scsi_dev *svdev)
1544 {
1545 	struct virtio_scsi_scan_base *base;
1546 	struct spdk_io_channel *io_ch;
1547 	struct virtio_scsi_io_ctx *io_ctx;
1548 	struct virtio_scsi_cmd_req *req;
1549 	struct virtio_scsi_cmd_resp *resp;
1550 
1551 	io_ch = spdk_get_io_channel(svdev);
1552 	if (io_ch == NULL) {
1553 		return -EBUSY;
1554 	}
1555 
1556 	base = spdk_zmalloc(sizeof(*base), 64, NULL,
1557 			    SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
1558 	if (base == NULL) {
1559 		SPDK_ERRLOG("couldn't allocate memory for scsi target scan.\n");
1560 		return -ENOMEM;
1561 	}
1562 
1563 	base->svdev = svdev;
1564 
1565 	base->channel = spdk_io_channel_get_ctx(io_ch);
1566 	TAILQ_INIT(&base->scan_queue);
1567 	svdev->scan_ctx = base;
1568 
1569 	base->iov.iov_base = base->payload;
1570 	io_ctx = &base->io_ctx;
1571 	req = &io_ctx->req;
1572 	resp = &io_ctx->resp;
1573 	io_ctx->iov_req.iov_base = req;
1574 	io_ctx->iov_req.iov_len = sizeof(*req);
1575 	io_ctx->iov_resp.iov_base = resp;
1576 	io_ctx->iov_resp.iov_len = sizeof(*resp);
1577 
1578 	base->retries = SCAN_REQUEST_RETRIES;
1579 	return 0;
1580 }
1581 
1582 static void
1583 _virtio_scsi_dev_scan_tgt(struct virtio_scsi_scan_base *base, uint8_t target)
1584 {
1585 	int rc;
1586 
1587 	memset(&base->info, 0, sizeof(base->info));
1588 	base->info.target = target;
1589 
1590 	rc = send_inquiry(base);
1591 	if (rc) {
1592 		/* Let response poller do the resend */
1593 	}
1594 }
1595 
1596 static int
1597 virtio_scsi_dev_scan(struct virtio_scsi_dev *svdev, bdev_virtio_create_cb cb_fn,
1598 		     void *cb_arg)
1599 {
1600 	struct virtio_scsi_scan_base *base;
1601 	struct virtio_scsi_scan_info *tgt, *next_tgt;
1602 	int rc;
1603 
1604 	if (svdev->scan_ctx) {
1605 		if (svdev->scan_ctx->full_scan) {
1606 			return -EEXIST;
1607 		}
1608 
1609 		/* We're about to start a full rescan, so there's no need
1610 		 * to scan particular targets afterwards.
1611 		 */
1612 		TAILQ_FOREACH_SAFE(tgt, &svdev->scan_ctx->scan_queue, tailq, next_tgt) {
1613 			TAILQ_REMOVE(&svdev->scan_ctx->scan_queue, tgt, tailq);
1614 			free(tgt);
1615 		}
1616 
1617 		svdev->scan_ctx->cb_fn = cb_fn;
1618 		svdev->scan_ctx->cb_arg = cb_arg;
1619 		svdev->scan_ctx->restart = true;
1620 		return 0;
1621 	}
1622 
1623 	rc = _virtio_scsi_dev_scan_init(svdev);
1624 	if (rc != 0) {
1625 		return rc;
1626 	}
1627 
1628 	base = svdev->scan_ctx;
1629 	base->cb_fn = cb_fn;
1630 	base->cb_arg = cb_arg;
1631 	base->full_scan = true;
1632 
1633 	_virtio_scsi_dev_scan_tgt(base, 0);
1634 	return 0;
1635 }
1636 
1637 static int
1638 virtio_scsi_dev_scan_tgt(struct virtio_scsi_dev *svdev, uint8_t target)
1639 {
1640 	struct virtio_scsi_scan_base *base;
1641 	struct virtio_scsi_scan_info *info;
1642 	int rc;
1643 
1644 	base = svdev->scan_ctx;
1645 	if (base) {
1646 		info = calloc(1, sizeof(*info));
1647 		if (info == NULL) {
1648 			SPDK_ERRLOG("calloc failed\n");
1649 			return -ENOMEM;
1650 		}
1651 
1652 		info->target = target;
1653 		TAILQ_INSERT_TAIL(&base->scan_queue, info, tailq);
1654 		return 0;
1655 	}
1656 
1657 	rc = _virtio_scsi_dev_scan_init(svdev);
1658 	if (rc != 0) {
1659 		return rc;
1660 	}
1661 
1662 	base = svdev->scan_ctx;
1663 	base->full_scan = true;
1664 	_virtio_scsi_dev_scan_tgt(base, target);
1665 	return 0;
1666 }
1667 
1668 static int
1669 bdev_virtio_initialize(void)
1670 {
1671 	return 0;
1672 }
1673 
1674 static void
1675 _virtio_scsi_dev_unregister_cb(void *io_device)
1676 {
1677 	struct virtio_scsi_dev *svdev = io_device;
1678 	struct virtio_dev *vdev = &svdev->vdev;
1679 	bool finish_module;
1680 	bdev_virtio_remove_cb remove_cb;
1681 	void *remove_ctx;
1682 
1683 	assert(spdk_ring_count(svdev->ctrlq_ring) == 0);
1684 	spdk_ring_free(svdev->ctrlq_ring);
1685 	spdk_poller_unregister(&svdev->mgmt_poller);
1686 
1687 	virtio_dev_release_queue(vdev, VIRTIO_SCSI_EVENTQ);
1688 	virtio_dev_release_queue(vdev, VIRTIO_SCSI_CONTROLQ);
1689 
1690 	virtio_dev_stop(vdev);
1691 	virtio_dev_destruct(vdev);
1692 
1693 	pthread_mutex_lock(&g_virtio_scsi_mutex);
1694 	TAILQ_REMOVE(&g_virtio_scsi_devs, svdev, tailq);
1695 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
1696 
1697 	remove_cb = svdev->remove_cb;
1698 	remove_ctx = svdev->remove_ctx;
1699 	spdk_free(svdev->eventq_ios);
1700 	free(svdev);
1701 
1702 	if (remove_cb) {
1703 		remove_cb(remove_ctx, 0);
1704 	}
1705 
1706 	finish_module = TAILQ_EMPTY(&g_virtio_scsi_devs);
1707 
1708 	if (g_bdev_virtio_finish && finish_module) {
1709 		spdk_bdev_module_fini_done();
1710 	}
1711 }
1712 
1713 static void
1714 virtio_scsi_dev_unregister_cb(void *io_device)
1715 {
1716 	struct virtio_scsi_dev *svdev = io_device;
1717 	struct spdk_thread *thread;
1718 
1719 	thread = virtio_dev_queue_get_thread(&svdev->vdev, VIRTIO_SCSI_CONTROLQ);
1720 	spdk_thread_send_msg(thread, _virtio_scsi_dev_unregister_cb, io_device);
1721 }
1722 
1723 static void
1724 virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev,
1725 		       bdev_virtio_remove_cb cb_fn, void *cb_arg)
1726 {
1727 	struct virtio_scsi_disk *disk, *disk_tmp;
1728 	bool do_remove = true;
1729 
1730 	if (svdev->removed) {
1731 		if (cb_fn) {
1732 			cb_fn(cb_arg, -EBUSY);
1733 		}
1734 		return;
1735 	}
1736 
1737 	svdev->remove_cb = cb_fn;
1738 	svdev->remove_ctx = cb_arg;
1739 	svdev->removed = true;
1740 
1741 	if (svdev->scan_ctx) {
1742 		/* The removal will continue after we receive a pending scan I/O. */
1743 		return;
1744 	}
1745 
1746 	TAILQ_FOREACH_SAFE(disk, &svdev->luns, link, disk_tmp) {
1747 		if (!disk->removed) {
1748 			spdk_bdev_unregister(&disk->bdev, NULL, NULL);
1749 		}
1750 		do_remove = false;
1751 	}
1752 
1753 	if (do_remove) {
1754 		spdk_io_device_unregister(svdev, virtio_scsi_dev_unregister_cb);
1755 	}
1756 }
1757 
1758 static void
1759 bdev_virtio_finish(void)
1760 {
1761 	struct virtio_scsi_dev *svdev, *next;
1762 
1763 	g_bdev_virtio_finish = true;
1764 
1765 	pthread_mutex_lock(&g_virtio_scsi_mutex);
1766 	if (TAILQ_EMPTY(&g_virtio_scsi_devs)) {
1767 		pthread_mutex_unlock(&g_virtio_scsi_mutex);
1768 		spdk_bdev_module_fini_done();
1769 		return;
1770 	}
1771 
1772 	/* Defer module finish until all controllers are removed. */
1773 	TAILQ_FOREACH_SAFE(svdev, &g_virtio_scsi_devs, tailq, next) {
1774 		virtio_scsi_dev_remove(svdev, NULL, NULL);
1775 	}
1776 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
1777 }
1778 
1779 int
1780 bdev_virtio_user_scsi_dev_create(const char *base_name, const char *path,
1781 				 unsigned num_queues, unsigned queue_size,
1782 				 bdev_virtio_create_cb cb_fn, void *cb_arg)
1783 {
1784 	struct virtio_scsi_dev *svdev;
1785 	int rc;
1786 
1787 	svdev = virtio_user_scsi_dev_create(base_name, path, num_queues, queue_size);
1788 	if (svdev == NULL) {
1789 		return -1;
1790 	}
1791 
1792 	rc = virtio_scsi_dev_scan(svdev, cb_fn, cb_arg);
1793 	if (rc) {
1794 		virtio_scsi_dev_remove(svdev, NULL, NULL);
1795 	}
1796 
1797 	return rc;
1798 }
1799 
1800 struct bdev_virtio_pci_dev_create_ctx {
1801 	const char *name;
1802 	bdev_virtio_create_cb cb_fn;
1803 	void *cb_arg;
1804 };
1805 
1806 static int
1807 bdev_virtio_pci_scsi_dev_create_cb(struct virtio_pci_ctx *pci_ctx, void *ctx)
1808 {
1809 	struct virtio_scsi_dev *svdev;
1810 	struct bdev_virtio_pci_dev_create_ctx *create_ctx = ctx;
1811 	int rc;
1812 
1813 	svdev = virtio_pci_scsi_dev_create(create_ctx->name, pci_ctx);
1814 	if (svdev == NULL) {
1815 		return -1;
1816 	}
1817 
1818 	rc = virtio_scsi_dev_scan(svdev, create_ctx->cb_fn, create_ctx->cb_arg);
1819 	if (rc) {
1820 		svdev->vdev.ctx = NULL;
1821 		virtio_scsi_dev_remove(svdev, NULL, NULL);
1822 	}
1823 
1824 	return rc;
1825 }
1826 
1827 int
1828 bdev_virtio_pci_scsi_dev_create(const char *name, struct spdk_pci_addr *pci_addr,
1829 				bdev_virtio_create_cb cb_fn, void *cb_arg)
1830 {
1831 	struct bdev_virtio_pci_dev_create_ctx create_ctx;
1832 
1833 	create_ctx.name = name;
1834 	create_ctx.cb_fn = cb_fn;
1835 	create_ctx.cb_arg = cb_arg;
1836 
1837 	return virtio_pci_dev_attach(bdev_virtio_pci_scsi_dev_create_cb, &create_ctx,
1838 				     VIRTIO_ID_SCSI, pci_addr);
1839 }
1840 
1841 int
1842 bdev_virtio_scsi_dev_remove(const char *name, bdev_virtio_remove_cb cb_fn, void *cb_arg)
1843 {
1844 	struct virtio_scsi_dev *svdev;
1845 
1846 	pthread_mutex_lock(&g_virtio_scsi_mutex);
1847 	TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) {
1848 		if (strcmp(svdev->vdev.name, name) == 0) {
1849 			break;
1850 		}
1851 	}
1852 
1853 	if (svdev == NULL) {
1854 		pthread_mutex_unlock(&g_virtio_scsi_mutex);
1855 		SPDK_ERRLOG("Cannot find Virtio-SCSI device named '%s'\n", name);
1856 		return -ENODEV;
1857 	}
1858 
1859 	virtio_scsi_dev_remove(svdev, cb_fn, cb_arg);
1860 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
1861 
1862 	return 0;
1863 }
1864 
1865 void
1866 bdev_virtio_scsi_dev_list(struct spdk_json_write_ctx *w)
1867 {
1868 	struct virtio_scsi_dev *svdev;
1869 
1870 	spdk_json_write_array_begin(w);
1871 
1872 	pthread_mutex_lock(&g_virtio_scsi_mutex);
1873 	TAILQ_FOREACH(svdev, &g_virtio_scsi_devs, tailq) {
1874 		spdk_json_write_object_begin(w);
1875 
1876 		spdk_json_write_named_string(w, "name", svdev->vdev.name);
1877 
1878 		virtio_dev_dump_json_info(&svdev->vdev, w);
1879 
1880 		spdk_json_write_object_end(w);
1881 	}
1882 	pthread_mutex_unlock(&g_virtio_scsi_mutex);
1883 
1884 	spdk_json_write_array_end(w);
1885 }
1886 
1887 SPDK_LOG_REGISTER_COMPONENT(virtio)
1888