xref: /spdk/lib/virtio/virtio.c (revision b119facb65247c714030aa19f3f0528bcd28a834)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include <linux/virtio_scsi.h>
37 #include <linux/virtio_pci.h>
38 #include <linux/virtio_config.h>
39 
40 #include <rte_config.h>
41 #include <rte_memcpy.h>
42 #include <rte_string_fns.h>
43 #include <rte_memzone.h>
44 #include <rte_malloc.h>
45 #include <rte_atomic.h>
46 #include <rte_branch_prediction.h>
47 #include <rte_pci.h>
48 #include <rte_common.h>
49 #include <rte_errno.h>
50 
51 #include <rte_eal.h>
52 #include <rte_dev.h>
53 #include <rte_prefetch.h>
54 
55 #include "spdk/env.h"
56 #include "spdk/barrier.h"
57 
58 #include "spdk_internal/virtio.h"
59 
60 /* We use SMP memory barrier variants as all virtio_pci devices
61  * are purely virtual. All MMIO is executed on a CPU core, so
62  * there's no need to do full MMIO synchronization.
63  */
64 #define virtio_mb()	spdk_smp_mb()
65 #define virtio_rmb()	spdk_smp_rmb()
66 #define virtio_wmb()	spdk_smp_wmb()
67 
68 struct virtio_driver g_virtio_driver = {
69 	.scsi_devs = TAILQ_HEAD_INITIALIZER(g_virtio_driver.scsi_devs),
70 };
71 
72 /* Chain all the descriptors in the ring with an END */
73 static inline void
74 vring_desc_init(struct vring_desc *dp, uint16_t n)
75 {
76 	uint16_t i;
77 
78 	for (i = 0; i < n - 1; i++) {
79 		dp[i].next = (uint16_t)(i + 1);
80 	}
81 	dp[i].next = VQ_RING_DESC_CHAIN_END;
82 }
83 
84 static void
85 virtio_init_vring(struct virtqueue *vq)
86 {
87 	int size = vq->vq_nentries;
88 	struct vring *vr = &vq->vq_ring;
89 	uint8_t *ring_mem = vq->vq_ring_virt_mem;
90 
91 	/*
92 	 * Reinitialise since virtio port might have been stopped and restarted
93 	 */
94 	memset(ring_mem, 0, vq->vq_ring_size);
95 	vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
96 	vq->vq_used_cons_idx = 0;
97 	vq->vq_desc_head_idx = 0;
98 	vq->vq_avail_idx = 0;
99 	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
100 	vq->vq_free_cnt = vq->vq_nentries;
101 	vq->req_start = VQ_RING_DESC_CHAIN_END;
102 	vq->req_end = VQ_RING_DESC_CHAIN_END;
103 	memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
104 
105 	vring_desc_init(vr->desc, size);
106 
107 	/* Tell the backend not to interrupt us. */
108 	vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
109 }
110 
111 static int
112 virtio_init_queue(struct virtio_dev *dev, uint16_t vtpci_queue_idx)
113 {
114 	void *queue_mem;
115 	unsigned int vq_size, size;
116 	uint64_t queue_mem_phys_addr;
117 	struct virtqueue *vq;
118 	int ret;
119 
120 	SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_DEV, "setting up queue: %"PRIu16"\n", vtpci_queue_idx);
121 
122 	/*
123 	 * Read the virtqueue size from the Queue Size field
124 	 * Always power of 2 and if 0 virtqueue does not exist
125 	 */
126 	vq_size = virtio_dev_backend_ops(dev)->get_queue_num(dev, vtpci_queue_idx);
127 	SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_DEV, "vq_size: %u\n", vq_size);
128 	if (vq_size == 0) {
129 		SPDK_ERRLOG("virtqueue %"PRIu16" does not exist\n", vtpci_queue_idx);
130 		return -EINVAL;
131 	}
132 
133 	if (!rte_is_power_of_2(vq_size)) {
134 		SPDK_ERRLOG("virtqueue %"PRIu16" size (%u) is not powerof 2\n",
135 			    vtpci_queue_idx, vq_size);
136 		return -EINVAL;
137 	}
138 
139 	size = RTE_ALIGN_CEIL(sizeof(*vq) +
140 			      vq_size * sizeof(struct vq_desc_extra),
141 			      RTE_CACHE_LINE_SIZE);
142 
143 	vq = spdk_dma_zmalloc(size, RTE_CACHE_LINE_SIZE, NULL);
144 	if (vq == NULL) {
145 		SPDK_ERRLOG("can not allocate vq\n");
146 		return -ENOMEM;
147 	}
148 	dev->vqs[vtpci_queue_idx] = vq;
149 
150 	vq->vdev = dev;
151 	vq->vq_queue_index = vtpci_queue_idx;
152 	vq->vq_nentries = vq_size;
153 
154 	/*
155 	 * Reserve a memzone for vring elements
156 	 */
157 	size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
158 	vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
159 	SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_DEV, "vring_size: %u, rounded_vring_size: %u\n",
160 		      size, vq->vq_ring_size);
161 
162 	queue_mem = spdk_dma_zmalloc(vq->vq_ring_size, VIRTIO_PCI_VRING_ALIGN, &queue_mem_phys_addr);
163 	if (queue_mem == NULL) {
164 		ret = -ENOMEM;
165 		goto fail_q_alloc;
166 	}
167 
168 	vq->vq_ring_mem = queue_mem_phys_addr;
169 	vq->vq_ring_virt_mem = queue_mem;
170 	SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_DEV, "vq->vq_ring_mem:      0x%" PRIx64 "\n",
171 		      vq->vq_ring_mem);
172 	SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_DEV, "vq->vq_ring_virt_mem: 0x%" PRIx64 "\n",
173 		      (uint64_t)(uintptr_t)vq->vq_ring_virt_mem);
174 
175 	virtio_init_vring(vq);
176 
177 	vq->owner_thread = NULL;
178 
179 	if (virtio_dev_backend_ops(dev)->setup_queue(dev, vq) < 0) {
180 		SPDK_ERRLOG("setup_queue failed\n");
181 		return -EINVAL;
182 	}
183 
184 	return 0;
185 
186 fail_q_alloc:
187 	rte_free(vq);
188 
189 	return ret;
190 }
191 
192 static void
193 virtio_free_queues(struct virtio_dev *dev)
194 {
195 	uint16_t nr_vq = dev->max_queues;
196 	struct virtqueue *vq;
197 	uint16_t i;
198 
199 	if (dev->vqs == NULL) {
200 		return;
201 	}
202 
203 	for (i = 0; i < nr_vq; i++) {
204 		vq = dev->vqs[i];
205 		if (!vq) {
206 			continue;
207 		}
208 
209 		spdk_dma_free(vq->vq_ring_virt_mem);
210 
211 		rte_free(vq);
212 		dev->vqs[i] = NULL;
213 	}
214 
215 	rte_free(dev->vqs);
216 	dev->vqs = NULL;
217 }
218 
219 static int
220 virtio_alloc_queues(struct virtio_dev *dev, uint16_t request_vq_num, uint16_t fixed_vq_num)
221 {
222 	uint16_t nr_vq;
223 	uint16_t i;
224 	int ret;
225 
226 	nr_vq = request_vq_num + fixed_vq_num;
227 	if (nr_vq == 0) {
228 		/* perfectly fine to have a device with no virtqueues. */
229 		return 0;
230 	}
231 
232 	assert(dev->vqs == NULL);
233 	dev->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
234 	if (!dev->vqs) {
235 		SPDK_ERRLOG("failed to allocate %"PRIu16" vqs\n", nr_vq);
236 		return -ENOMEM;
237 	}
238 
239 	for (i = 0; i < nr_vq; i++) {
240 		ret = virtio_init_queue(dev, i);
241 		if (ret < 0) {
242 			virtio_free_queues(dev);
243 			return ret;
244 		}
245 	}
246 
247 	dev->max_queues = nr_vq;
248 	dev->fixed_queues_num = fixed_vq_num;
249 	return 0;
250 }
251 
252 /**
253  * Negotiate virtio features. For virtio_user this will also set
254  * dev->modern flag if VIRTIO_F_VERSION_1 flag is negotiated.
255  */
256 static int
257 virtio_negotiate_features(struct virtio_dev *dev, uint64_t req_features)
258 {
259 	uint64_t host_features = virtio_dev_backend_ops(dev)->get_features(dev);
260 	int rc;
261 
262 	SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_DEV, "guest features = %" PRIx64 "\n", req_features);
263 	SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_DEV, "device features = %" PRIx64 "\n", host_features);
264 
265 	rc = virtio_dev_backend_ops(dev)->set_features(dev, req_features & host_features);
266 	if (rc != 0) {
267 		SPDK_ERRLOG("failed to negotiate device features.\n");
268 		return -1;
269 	}
270 
271 	SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_DEV, "negotiated features = %" PRIx64 "\n",
272 		      dev->negotiated_features);
273 
274 	virtio_dev_set_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
275 	if (!(virtio_dev_get_status(dev) & VIRTIO_CONFIG_S_FEATURES_OK)) {
276 		SPDK_ERRLOG("failed to set FEATURES_OK status!\n");
277 		return -1;
278 	}
279 
280 	return 0;
281 }
282 
283 int
284 virtio_dev_construct(struct virtio_dev *vdev, const char *name,
285 		     const struct virtio_dev_ops *ops, void *ctx)
286 {
287 	int rc;
288 
289 	vdev->name = strdup(name);
290 	if (vdev->name == NULL) {
291 		return -ENOMEM;
292 	}
293 
294 	rc = pthread_mutex_init(&vdev->mutex, NULL);
295 	if (rc != 0) {
296 		free(vdev->name);
297 		return -rc;
298 	}
299 
300 	vdev->backend_ops = ops;
301 	vdev->ctx = ctx;
302 
303 	return 0;
304 }
305 
306 int
307 virtio_dev_reset(struct virtio_dev *dev, uint64_t req_features)
308 {
309 	req_features |= (1ULL << VIRTIO_F_VERSION_1);
310 
311 	/* Reset the device although not necessary at startup */
312 	virtio_dev_stop(dev);
313 
314 	/* Tell the host we've noticed this device. */
315 	virtio_dev_set_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
316 
317 	/* Tell the host we've known how to drive the device. */
318 	virtio_dev_set_status(dev, VIRTIO_CONFIG_S_DRIVER);
319 
320 	return virtio_negotiate_features(dev, req_features);
321 }
322 
323 int
324 virtio_dev_start(struct virtio_dev *vdev, uint16_t max_queues, uint16_t fixed_queue_num)
325 {
326 	int ret;
327 
328 	ret = virtio_alloc_queues(vdev, max_queues, fixed_queue_num);
329 	if (ret < 0) {
330 		return ret;
331 	}
332 
333 	virtio_dev_set_status(vdev, VIRTIO_CONFIG_S_DRIVER_OK);
334 	return 0;
335 }
336 
337 void
338 virtio_dev_destruct(struct virtio_dev *dev)
339 {
340 	virtio_dev_backend_ops(dev)->destruct_dev(dev);
341 	pthread_mutex_destroy(&dev->mutex);
342 	free(dev->name);
343 }
344 
345 static void
346 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
347 {
348 	struct vring_desc *dp, *dp_tail;
349 	struct vq_desc_extra *dxp;
350 	uint16_t desc_idx_last = desc_idx;
351 
352 	dp  = &vq->vq_ring.desc[desc_idx];
353 	dxp = &vq->vq_descx[desc_idx];
354 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
355 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
356 		while (dp->flags & VRING_DESC_F_NEXT) {
357 			desc_idx_last = dp->next;
358 			dp = &vq->vq_ring.desc[dp->next];
359 		}
360 	}
361 	dxp->ndescs = 0;
362 
363 	/*
364 	 * We must append the existing free chain, if any, to the end of
365 	 * newly freed chain. If the virtqueue was completely used, then
366 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
367 	 */
368 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
369 		vq->vq_desc_head_idx = desc_idx;
370 	} else {
371 		dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
372 		dp_tail->next = desc_idx;
373 	}
374 
375 	vq->vq_desc_tail_idx = desc_idx_last;
376 	dp->next = VQ_RING_DESC_CHAIN_END;
377 }
378 
379 static uint16_t
380 virtqueue_dequeue_burst_rx(struct virtqueue *vq, void **rx_pkts,
381 			   uint32_t *len, uint16_t num)
382 {
383 	struct vring_used_elem *uep;
384 	struct virtio_req *cookie;
385 	uint16_t used_idx, desc_idx;
386 	uint16_t i;
387 
388 	/*  Caller does the check */
389 	for (i = 0; i < num ; i++) {
390 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
391 		uep = &vq->vq_ring.used->ring[used_idx];
392 		desc_idx = (uint16_t) uep->id;
393 		len[i] = uep->len;
394 		cookie = (struct virtio_req *)vq->vq_descx[desc_idx].cookie;
395 
396 		if (spdk_unlikely(cookie == NULL)) {
397 			SPDK_WARNLOG("vring descriptor with no mbuf cookie at %"PRIu16"\n",
398 				     vq->vq_used_cons_idx);
399 			break;
400 		}
401 
402 		rte_prefetch0(cookie);
403 		rx_pkts[i]  = cookie;
404 		vq->vq_used_cons_idx++;
405 		vq_ring_free_chain(vq, desc_idx);
406 		vq->vq_descx[desc_idx].cookie = NULL;
407 	}
408 
409 	return i;
410 }
411 
412 int
413 virtqueue_req_start(struct virtqueue *vq, void *cookie, int iovcnt)
414 {
415 	struct vring_desc *desc;
416 	struct vq_desc_extra *dxp;
417 
418 	if (iovcnt > vq->vq_free_cnt) {
419 		return iovcnt > vq->vq_nentries ? -EINVAL : -ENOMEM;
420 	}
421 
422 	if (vq->req_start != VQ_RING_DESC_CHAIN_END) {
423 		desc = &vq->vq_ring.desc[vq->req_end];
424 		desc->flags &= ~VRING_DESC_F_NEXT;
425 	}
426 
427 	vq->req_start = vq->vq_desc_head_idx;
428 	dxp = &vq->vq_descx[vq->req_start];
429 	dxp->cookie = cookie;
430 	dxp->ndescs = 0;
431 
432 	return 0;
433 }
434 
435 void
436 virtqueue_req_flush(struct virtqueue *vq)
437 {
438 	struct vring_desc *desc;
439 	uint16_t avail_idx;
440 
441 	if (vq->req_start == VQ_RING_DESC_CHAIN_END) {
442 		/* no requests have been started */
443 		return;
444 	}
445 
446 	desc = &vq->vq_ring.desc[vq->req_end];
447 	desc->flags &= ~VRING_DESC_F_NEXT;
448 
449 	/*
450 	 * Place the head of the descriptor chain into the next slot and make
451 	 * it usable to the host. The chain is made available now rather than
452 	 * deferring to virtqueue_notify() in the hopes that if the host is
453 	 * currently running on another CPU, we can keep it processing the new
454 	 * descriptor.
455 	 */
456 	avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
457 	if (spdk_unlikely(vq->vq_ring.avail->ring[avail_idx] != vq->req_start)) {
458 		vq->vq_ring.avail->ring[avail_idx] = vq->req_start;
459 	}
460 
461 	vq->vq_avail_idx++;
462 	vq->req_start = VQ_RING_DESC_CHAIN_END;
463 
464 	virtio_wmb();
465 	vq->vq_ring.avail->idx = vq->vq_avail_idx;
466 
467 	virtio_mb();
468 	if (spdk_unlikely(!(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY))) {
469 		virtio_dev_backend_ops(vq->vdev)->notify_queue(vq->vdev, vq);
470 		SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_DEV, "Notified backend after xmit\n");
471 	}
472 }
473 
474 void
475 virtqueue_req_abort(struct virtqueue *vq)
476 {
477 	struct vring_desc *desc;
478 
479 	if (vq->req_start == VQ_RING_DESC_CHAIN_END) {
480 		/* no requests have been started */
481 		return;
482 	}
483 
484 	desc = &vq->vq_ring.desc[vq->req_end];
485 	desc->flags &= ~VRING_DESC_F_NEXT;
486 
487 	vq_ring_free_chain(vq, vq->req_start);
488 	vq->req_start = VQ_RING_DESC_CHAIN_END;
489 }
490 
491 void
492 virtqueue_req_add_iovs(struct virtqueue *vq, struct iovec *iovs, uint16_t iovcnt,
493 		       enum spdk_virtio_desc_type desc_type)
494 {
495 	struct vring_desc *desc;
496 	struct vq_desc_extra *dxp;
497 	uint16_t i, prev_head, new_head;
498 
499 	assert(vq->req_start != VQ_RING_DESC_CHAIN_END);
500 	assert(iovcnt <= vq->vq_free_cnt);
501 
502 	/* TODO use indirect descriptors if iovcnt is high enough
503 	 * or the caller specifies SPDK_VIRTIO_DESC_F_INDIRECT
504 	 */
505 
506 	prev_head = vq->req_end;
507 	new_head = vq->vq_desc_head_idx;
508 	for (i = 0; i < iovcnt; ++i) {
509 		desc = &vq->vq_ring.desc[new_head];
510 
511 		if (!vq->vdev->is_hw) {
512 			desc->addr  = (uintptr_t)iovs[i].iov_base;
513 		} else {
514 			desc->addr = spdk_vtophys(iovs[i].iov_base);
515 		}
516 
517 		desc->len = iovs[i].iov_len;
518 		/* always set NEXT flag. unset it on the last descriptor
519 		 * in the request-ending function.
520 		 */
521 		desc->flags = desc_type | VRING_DESC_F_NEXT;
522 
523 		prev_head = new_head;
524 		new_head = desc->next;
525 	}
526 
527 	dxp = &vq->vq_descx[vq->req_start];
528 	dxp->ndescs += iovcnt;
529 
530 	vq->req_end = prev_head;
531 	vq->vq_desc_head_idx = new_head;
532 	if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
533 		assert(vq->vq_free_cnt == 0);
534 		vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
535 	}
536 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - iovcnt);
537 }
538 
539 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
540 uint16_t
541 virtio_recv_pkts(struct virtqueue *vq, void **io, uint32_t *len, uint16_t nb_pkts)
542 {
543 	uint16_t nb_used, num;
544 
545 	nb_used = vq->vq_ring.used->idx - vq->vq_used_cons_idx;
546 	virtio_rmb();
547 
548 	num = (uint16_t)(spdk_likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
549 	if (spdk_likely(num > DESC_PER_CACHELINE)) {
550 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
551 	}
552 
553 	num = virtqueue_dequeue_burst_rx(vq, io, len, num);
554 	SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_DEV, "used:%"PRIu16" dequeue:%"PRIu16"\n", nb_used, num);
555 
556 	return num;
557 }
558 
559 int
560 virtio_dev_acquire_queue(struct virtio_dev *vdev, uint16_t index)
561 {
562 	struct virtqueue *vq = NULL;
563 
564 	if (index >= vdev->max_queues) {
565 		SPDK_ERRLOG("requested vq index %"PRIu16" exceeds max queue count %"PRIu16".\n",
566 			    index, vdev->max_queues);
567 		return -1;
568 	}
569 
570 	pthread_mutex_lock(&vdev->mutex);
571 	vq = vdev->vqs[index];
572 	if (vq == NULL || vq->owner_thread != NULL) {
573 		pthread_mutex_unlock(&vdev->mutex);
574 		return -1;
575 	}
576 
577 	vq->owner_thread = spdk_get_thread();
578 	pthread_mutex_unlock(&vdev->mutex);
579 	return 0;
580 }
581 
582 int32_t
583 virtio_dev_find_and_acquire_queue(struct virtio_dev *vdev, uint16_t start_index)
584 {
585 	struct virtqueue *vq = NULL;
586 	uint16_t i;
587 
588 	pthread_mutex_lock(&vdev->mutex);
589 	for (i = start_index; i < vdev->max_queues; ++i) {
590 		vq = vdev->vqs[i];
591 		if (vq != NULL && vq->owner_thread == NULL) {
592 			break;
593 		}
594 	}
595 
596 	if (vq == NULL || i == vdev->max_queues) {
597 		SPDK_ERRLOG("no more unused virtio queues with idx >= %"PRIu16".\n", start_index);
598 		pthread_mutex_unlock(&vdev->mutex);
599 		return -1;
600 	}
601 
602 	vq->owner_thread = spdk_get_thread();
603 	pthread_mutex_unlock(&vdev->mutex);
604 	return i;
605 }
606 
607 struct spdk_thread *
608 virtio_dev_queue_get_thread(struct virtio_dev *vdev, uint16_t index)
609 {
610 	struct virtqueue *vq;
611 	struct spdk_thread *thread = NULL;
612 
613 	if (index >= vdev->max_queues) {
614 		SPDK_ERRLOG("given vq index %"PRIu16" exceeds max queue count %"PRIu16"\n",
615 			    index, vdev->max_queues);
616 		return NULL;
617 	}
618 
619 	pthread_mutex_lock(&vdev->mutex);
620 	vq = vdev->vqs[index];
621 	if (vq != NULL) {
622 		thread = vq->owner_thread;
623 	}
624 	pthread_mutex_unlock(&vdev->mutex);
625 
626 	return thread;
627 }
628 
629 bool
630 virtio_dev_queue_is_acquired(struct virtio_dev *vdev, uint16_t index)
631 {
632 	return virtio_dev_queue_get_thread(vdev, index) != NULL;
633 }
634 
635 void
636 virtio_dev_release_queue(struct virtio_dev *vdev, uint16_t index)
637 {
638 	struct virtqueue *vq = NULL;
639 
640 	if (index >= vdev->max_queues) {
641 		SPDK_ERRLOG("given vq index %"PRIu16" exceeds max queue count %"PRIu16".\n",
642 			    index, vdev->max_queues);
643 		return;
644 	}
645 
646 	pthread_mutex_lock(&vdev->mutex);
647 	vq = vdev->vqs[index];
648 	if (vq == NULL) {
649 		SPDK_ERRLOG("virtqueue at index %"PRIu16" is not initialized.\n", index);
650 		pthread_mutex_unlock(&vdev->mutex);
651 		return;
652 	}
653 
654 	assert(vq->owner_thread == spdk_get_thread());
655 	vq->owner_thread = NULL;
656 	pthread_mutex_unlock(&vdev->mutex);
657 }
658 
659 void
660 virtio_dev_read_dev_config(struct virtio_dev *dev, size_t offset,
661 			   void *dst, int length)
662 {
663 	virtio_dev_backend_ops(dev)->read_dev_cfg(dev, offset, dst, length);
664 }
665 
666 void
667 virtio_dev_write_dev_config(struct virtio_dev *dev, size_t offset,
668 			    const void *src, int length)
669 {
670 	virtio_dev_backend_ops(dev)->write_dev_cfg(dev, offset, src, length);
671 }
672 
673 void
674 virtio_dev_stop(struct virtio_dev *dev)
675 {
676 	virtio_dev_backend_ops(dev)->set_status(dev, VIRTIO_CONFIG_S_RESET);
677 	/* flush status write */
678 	virtio_dev_backend_ops(dev)->get_status(dev);
679 	virtio_free_queues(dev);
680 }
681 
682 void
683 virtio_dev_set_status(struct virtio_dev *dev, uint8_t status)
684 {
685 	if (status != VIRTIO_CONFIG_S_RESET) {
686 		status |= virtio_dev_backend_ops(dev)->get_status(dev);
687 	}
688 
689 	virtio_dev_backend_ops(dev)->set_status(dev, status);
690 }
691 
692 uint8_t
693 virtio_dev_get_status(struct virtio_dev *dev)
694 {
695 	return virtio_dev_backend_ops(dev)->get_status(dev);
696 }
697 
698 const struct virtio_dev_ops *
699 virtio_dev_backend_ops(struct virtio_dev *dev)
700 {
701 	return dev->backend_ops;
702 }
703 
704 void
705 virtio_dev_dump_json_config(struct virtio_dev *hw, struct spdk_json_write_ctx *w)
706 {
707 	spdk_json_write_name(w, "virtio");
708 	spdk_json_write_object_begin(w);
709 
710 	spdk_json_write_name(w, "vq_count");
711 	spdk_json_write_uint32(w, hw->max_queues);
712 
713 	spdk_json_write_name(w, "vq_size");
714 	spdk_json_write_uint32(w, virtio_dev_backend_ops(hw)->get_queue_num(hw, 0));
715 
716 	virtio_dev_backend_ops(hw)->dump_json_config(hw, w);
717 
718 	spdk_json_write_object_end(w);
719 }
720 
721 SPDK_LOG_REGISTER_COMPONENT("virtio_dev", SPDK_LOG_VIRTIO_DEV)
722