1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2010-2016 Intel Corporation. All rights reserved.
3 * All rights reserved.
4 */
5
6 #include "spdk/stdinc.h"
7
8 #include "spdk/env.h"
9 #include "spdk/util.h"
10 #include "spdk/barrier.h"
11
12 #include "spdk_internal/virtio.h"
13
14 /* We use SMP memory barrier variants as all virtio_pci devices
15 * are purely virtual. All MMIO is executed on a CPU core, so
16 * there's no need to do full MMIO synchronization.
17 */
18 #define virtio_mb() spdk_smp_mb()
19 #define virtio_rmb() spdk_smp_rmb()
20 #define virtio_wmb() spdk_smp_wmb()
21
22 /* Chain all the descriptors in the ring with an END */
23 static inline void
vring_desc_init(struct vring_desc * dp,uint16_t n)24 vring_desc_init(struct vring_desc *dp, uint16_t n)
25 {
26 uint16_t i;
27
28 for (i = 0; i < n - 1; i++) {
29 dp[i].next = (uint16_t)(i + 1);
30 }
31 dp[i].next = VQ_RING_DESC_CHAIN_END;
32 }
33
34 static void
virtio_init_vring(struct virtqueue * vq)35 virtio_init_vring(struct virtqueue *vq)
36 {
37 int size = vq->vq_nentries;
38 struct vring *vr = &vq->vq_ring;
39 uint8_t *ring_mem = vq->vq_ring_virt_mem;
40
41 /*
42 * Reinitialise since virtio port might have been stopped and restarted
43 */
44 memset(ring_mem, 0, vq->vq_ring_size);
45 vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
46 vq->vq_used_cons_idx = 0;
47 vq->vq_desc_head_idx = 0;
48 vq->vq_avail_idx = 0;
49 vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
50 vq->vq_free_cnt = vq->vq_nentries;
51 vq->req_start = VQ_RING_DESC_CHAIN_END;
52 vq->req_end = VQ_RING_DESC_CHAIN_END;
53 vq->reqs_finished = 0;
54 memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
55
56 vring_desc_init(vr->desc, size);
57
58 /* Tell the backend not to interrupt us.
59 * If F_EVENT_IDX is negotiated, we will always set incredibly high
60 * used event idx, so that we will practically never receive an
61 * interrupt. See virtqueue_req_flush()
62 */
63 if (vq->vdev->negotiated_features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
64 vring_used_event(&vq->vq_ring) = UINT16_MAX;
65 } else {
66 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
67 }
68 }
69
70 static int
virtio_init_queue(struct virtio_dev * dev,uint16_t vtpci_queue_idx)71 virtio_init_queue(struct virtio_dev *dev, uint16_t vtpci_queue_idx)
72 {
73 unsigned int vq_size, size;
74 struct virtqueue *vq;
75 int rc;
76
77 SPDK_DEBUGLOG(virtio_dev, "setting up queue: %"PRIu16"\n", vtpci_queue_idx);
78
79 /*
80 * Read the virtqueue size from the Queue Size field
81 * Always power of 2 and if 0 virtqueue does not exist
82 */
83 vq_size = virtio_dev_backend_ops(dev)->get_queue_size(dev, vtpci_queue_idx);
84 SPDK_DEBUGLOG(virtio_dev, "vq_size: %u\n", vq_size);
85 if (vq_size == 0) {
86 SPDK_ERRLOG("virtqueue %"PRIu16" does not exist\n", vtpci_queue_idx);
87 return -EINVAL;
88 }
89
90 if (!spdk_u32_is_pow2(vq_size)) {
91 SPDK_ERRLOG("virtqueue %"PRIu16" size (%u) is not powerof 2\n",
92 vtpci_queue_idx, vq_size);
93 return -EINVAL;
94 }
95
96 size = sizeof(*vq) + vq_size * sizeof(struct vq_desc_extra);
97
98 if (posix_memalign((void **)&vq, SPDK_CACHE_LINE_SIZE, size)) {
99 SPDK_ERRLOG("can not allocate vq\n");
100 return -ENOMEM;
101 }
102 memset(vq, 0, size);
103 dev->vqs[vtpci_queue_idx] = vq;
104
105 vq->vdev = dev;
106 vq->vq_queue_index = vtpci_queue_idx;
107 vq->vq_nentries = vq_size;
108
109 /*
110 * Reserve a memzone for vring elements
111 */
112 size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
113 vq->vq_ring_size = SPDK_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
114 SPDK_DEBUGLOG(virtio_dev, "vring_size: %u, rounded_vring_size: %u\n",
115 size, vq->vq_ring_size);
116
117 vq->owner_thread = NULL;
118
119 rc = virtio_dev_backend_ops(dev)->setup_queue(dev, vq);
120 if (rc < 0) {
121 SPDK_ERRLOG("setup_queue failed\n");
122 free(vq);
123 dev->vqs[vtpci_queue_idx] = NULL;
124 return rc;
125 }
126
127 SPDK_DEBUGLOG(virtio_dev, "vq->vq_ring_mem: 0x%" PRIx64 "\n",
128 vq->vq_ring_mem);
129 SPDK_DEBUGLOG(virtio_dev, "vq->vq_ring_virt_mem: 0x%" PRIx64 "\n",
130 (uint64_t)(uintptr_t)vq->vq_ring_virt_mem);
131
132 virtio_init_vring(vq);
133 return 0;
134 }
135
136 static void
virtio_free_queues(struct virtio_dev * dev)137 virtio_free_queues(struct virtio_dev *dev)
138 {
139 uint16_t nr_vq = dev->max_queues;
140 struct virtqueue *vq;
141 uint16_t i;
142
143 if (dev->vqs == NULL) {
144 return;
145 }
146
147 for (i = 0; i < nr_vq; i++) {
148 vq = dev->vqs[i];
149 if (!vq) {
150 continue;
151 }
152
153 virtio_dev_backend_ops(dev)->del_queue(dev, vq);
154
155 free(vq);
156 dev->vqs[i] = NULL;
157 }
158
159 free(dev->vqs);
160 dev->vqs = NULL;
161 }
162
163 static int
virtio_alloc_queues(struct virtio_dev * dev,uint16_t max_queues,uint16_t fixed_vq_num)164 virtio_alloc_queues(struct virtio_dev *dev, uint16_t max_queues, uint16_t fixed_vq_num)
165 {
166 uint16_t i;
167 int ret;
168
169 if (max_queues == 0) {
170 /* perfectly fine to have a device with no virtqueues. */
171 return 0;
172 }
173
174 assert(dev->vqs == NULL);
175 dev->vqs = calloc(1, sizeof(struct virtqueue *) * max_queues);
176 if (!dev->vqs) {
177 SPDK_ERRLOG("failed to allocate %"PRIu16" vqs\n", max_queues);
178 return -ENOMEM;
179 }
180
181 for (i = 0; i < max_queues; i++) {
182 ret = virtio_init_queue(dev, i);
183 if (ret < 0) {
184 virtio_free_queues(dev);
185 return ret;
186 }
187 }
188
189 dev->max_queues = max_queues;
190 dev->fixed_queues_num = fixed_vq_num;
191 return 0;
192 }
193
194 /**
195 * Negotiate virtio features. For virtio_user this will also set
196 * dev->modern flag if VIRTIO_F_VERSION_1 flag is negotiated.
197 */
198 static int
virtio_negotiate_features(struct virtio_dev * dev,uint64_t req_features)199 virtio_negotiate_features(struct virtio_dev *dev, uint64_t req_features)
200 {
201 uint64_t host_features = virtio_dev_backend_ops(dev)->get_features(dev);
202 int rc;
203
204 SPDK_DEBUGLOG(virtio_dev, "guest features = %" PRIx64 "\n", req_features);
205 SPDK_DEBUGLOG(virtio_dev, "device features = %" PRIx64 "\n", host_features);
206
207 rc = virtio_dev_backend_ops(dev)->set_features(dev, req_features & host_features);
208 if (rc != 0) {
209 SPDK_ERRLOG("failed to negotiate device features.\n");
210 return rc;
211 }
212
213 SPDK_DEBUGLOG(virtio_dev, "negotiated features = %" PRIx64 "\n",
214 dev->negotiated_features);
215
216 virtio_dev_set_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
217 if (!(virtio_dev_get_status(dev) & VIRTIO_CONFIG_S_FEATURES_OK)) {
218 SPDK_ERRLOG("failed to set FEATURES_OK status!\n");
219 /* either the device failed, or we offered some features that
220 * depend on other, not offered features.
221 */
222 return -EINVAL;
223 }
224
225 return 0;
226 }
227
228 int
virtio_dev_construct(struct virtio_dev * vdev,const char * name,const struct virtio_dev_ops * ops,void * ctx)229 virtio_dev_construct(struct virtio_dev *vdev, const char *name,
230 const struct virtio_dev_ops *ops, void *ctx)
231 {
232 int rc;
233
234 vdev->name = strdup(name);
235 if (vdev->name == NULL) {
236 return -ENOMEM;
237 }
238
239 rc = pthread_mutex_init(&vdev->mutex, NULL);
240 if (rc != 0) {
241 free(vdev->name);
242 return -rc;
243 }
244
245 vdev->backend_ops = ops;
246 vdev->ctx = ctx;
247
248 return 0;
249 }
250
251 int
virtio_dev_reset(struct virtio_dev * dev,uint64_t req_features)252 virtio_dev_reset(struct virtio_dev *dev, uint64_t req_features)
253 {
254 req_features |= (1ULL << VIRTIO_F_VERSION_1);
255
256 virtio_dev_stop(dev);
257
258 virtio_dev_set_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
259 if (!(virtio_dev_get_status(dev) & VIRTIO_CONFIG_S_ACKNOWLEDGE)) {
260 SPDK_ERRLOG("Failed to set VIRTIO_CONFIG_S_ACKNOWLEDGE status.\n");
261 return -EIO;
262 }
263
264 virtio_dev_set_status(dev, VIRTIO_CONFIG_S_DRIVER);
265 if (!(virtio_dev_get_status(dev) & VIRTIO_CONFIG_S_DRIVER)) {
266 SPDK_ERRLOG("Failed to set VIRTIO_CONFIG_S_DRIVER status.\n");
267 return -EIO;
268 }
269
270 return virtio_negotiate_features(dev, req_features);
271 }
272
273 int
virtio_dev_start(struct virtio_dev * vdev,uint16_t max_queues,uint16_t fixed_queue_num)274 virtio_dev_start(struct virtio_dev *vdev, uint16_t max_queues, uint16_t fixed_queue_num)
275 {
276 int ret;
277
278 ret = virtio_alloc_queues(vdev, max_queues, fixed_queue_num);
279 if (ret < 0) {
280 return ret;
281 }
282
283 virtio_dev_set_status(vdev, VIRTIO_CONFIG_S_DRIVER_OK);
284 if (!(virtio_dev_get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK)) {
285 SPDK_ERRLOG("Failed to set VIRTIO_CONFIG_S_DRIVER_OK status.\n");
286 return -1;
287 }
288
289 return 0;
290 }
291
292 void
virtio_dev_destruct(struct virtio_dev * dev)293 virtio_dev_destruct(struct virtio_dev *dev)
294 {
295 virtio_dev_backend_ops(dev)->destruct_dev(dev);
296 pthread_mutex_destroy(&dev->mutex);
297 free(dev->name);
298 }
299
300 static void
vq_ring_free_chain(struct virtqueue * vq,uint16_t desc_idx)301 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
302 {
303 struct vring_desc *dp, *dp_tail;
304 struct vq_desc_extra *dxp;
305 uint16_t desc_idx_last = desc_idx;
306
307 dp = &vq->vq_ring.desc[desc_idx];
308 dxp = &vq->vq_descx[desc_idx];
309 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
310 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
311 while (dp->flags & VRING_DESC_F_NEXT) {
312 desc_idx_last = dp->next;
313 dp = &vq->vq_ring.desc[dp->next];
314 }
315 }
316 dxp->ndescs = 0;
317
318 /*
319 * We must append the existing free chain, if any, to the end of
320 * newly freed chain. If the virtqueue was completely used, then
321 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
322 */
323 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
324 vq->vq_desc_head_idx = desc_idx;
325 } else {
326 dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
327 dp_tail->next = desc_idx;
328 }
329
330 vq->vq_desc_tail_idx = desc_idx_last;
331 dp->next = VQ_RING_DESC_CHAIN_END;
332 }
333
334 static uint16_t
virtqueue_dequeue_burst_rx(struct virtqueue * vq,void ** rx_pkts,uint32_t * len,uint16_t num)335 virtqueue_dequeue_burst_rx(struct virtqueue *vq, void **rx_pkts,
336 uint32_t *len, uint16_t num)
337 {
338 struct vring_used_elem *uep;
339 void *cookie;
340 uint16_t used_idx, desc_idx;
341 uint16_t i;
342
343 /* Caller does the check */
344 for (i = 0; i < num ; i++) {
345 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
346 uep = &vq->vq_ring.used->ring[used_idx];
347 desc_idx = (uint16_t) uep->id;
348 len[i] = uep->len;
349 cookie = vq->vq_descx[desc_idx].cookie;
350
351 if (spdk_unlikely(cookie == NULL)) {
352 SPDK_WARNLOG("vring descriptor with no mbuf cookie at %"PRIu16"\n",
353 vq->vq_used_cons_idx);
354 break;
355 }
356
357 __builtin_prefetch(cookie);
358
359 rx_pkts[i] = cookie;
360 vq->vq_used_cons_idx++;
361 vq_ring_free_chain(vq, desc_idx);
362 vq->vq_descx[desc_idx].cookie = NULL;
363 }
364
365 return i;
366 }
367
368 static void
finish_req(struct virtqueue * vq)369 finish_req(struct virtqueue *vq)
370 {
371 struct vring_desc *desc;
372 uint16_t avail_idx;
373
374 desc = &vq->vq_ring.desc[vq->req_end];
375 desc->flags &= ~VRING_DESC_F_NEXT;
376
377 /*
378 * Place the head of the descriptor chain into the next slot and make
379 * it usable to the host. The chain is made available now rather than
380 * deferring to virtqueue_req_flush() in the hopes that if the host is
381 * currently running on another CPU, we can keep it processing the new
382 * descriptor.
383 */
384 avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
385 vq->vq_ring.avail->ring[avail_idx] = vq->req_start;
386 vq->vq_avail_idx++;
387 vq->req_end = VQ_RING_DESC_CHAIN_END;
388 virtio_wmb();
389 vq->vq_ring.avail->idx = vq->vq_avail_idx;
390 vq->reqs_finished++;
391 }
392
393 int
virtqueue_req_start(struct virtqueue * vq,void * cookie,int iovcnt)394 virtqueue_req_start(struct virtqueue *vq, void *cookie, int iovcnt)
395 {
396 struct vq_desc_extra *dxp;
397
398 /* Reserve enough entries to handle iov split */
399 if (2 * iovcnt > vq->vq_free_cnt) {
400 return iovcnt > vq->vq_nentries ? -EINVAL : -ENOMEM;
401 }
402
403 if (vq->req_end != VQ_RING_DESC_CHAIN_END) {
404 finish_req(vq);
405 }
406
407 vq->req_start = vq->vq_desc_head_idx;
408 dxp = &vq->vq_descx[vq->req_start];
409 dxp->cookie = cookie;
410 dxp->ndescs = 0;
411
412 return 0;
413 }
414
415 void
virtqueue_req_flush(struct virtqueue * vq)416 virtqueue_req_flush(struct virtqueue *vq)
417 {
418 uint16_t reqs_finished;
419
420 if (vq->req_end == VQ_RING_DESC_CHAIN_END) {
421 /* no non-empty requests have been started */
422 return;
423 }
424
425 finish_req(vq);
426 virtio_mb();
427
428 reqs_finished = vq->reqs_finished;
429 vq->reqs_finished = 0;
430
431 if (vq->vdev->negotiated_features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
432 /* Set used event idx to a value the device will never reach.
433 * This effectively disables interrupts.
434 */
435 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx - vq->vq_nentries - 1;
436
437 if (!vring_need_event(vring_avail_event(&vq->vq_ring),
438 vq->vq_avail_idx,
439 vq->vq_avail_idx - reqs_finished)) {
440 return;
441 }
442 } else if (vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) {
443 return;
444 }
445
446 virtio_dev_backend_ops(vq->vdev)->notify_queue(vq->vdev, vq);
447 SPDK_DEBUGLOG(virtio_dev, "Notified backend after xmit\n");
448 }
449
450 void
virtqueue_req_abort(struct virtqueue * vq)451 virtqueue_req_abort(struct virtqueue *vq)
452 {
453 struct vring_desc *desc;
454
455 if (vq->req_start == VQ_RING_DESC_CHAIN_END) {
456 /* no requests have been started */
457 return;
458 }
459
460 desc = &vq->vq_ring.desc[vq->req_end];
461 desc->flags &= ~VRING_DESC_F_NEXT;
462
463 vq_ring_free_chain(vq, vq->req_start);
464 vq->req_start = VQ_RING_DESC_CHAIN_END;
465 }
466
467 void
virtqueue_req_add_iovs(struct virtqueue * vq,struct iovec * iovs,uint16_t iovcnt,enum spdk_virtio_desc_type desc_type)468 virtqueue_req_add_iovs(struct virtqueue *vq, struct iovec *iovs, uint16_t iovcnt,
469 enum spdk_virtio_desc_type desc_type)
470 {
471 struct vring_desc *desc;
472 struct vq_desc_extra *dxp;
473 uint16_t i, prev_head, new_head;
474 uint64_t processed_length, iovec_length, current_length;
475 void *current_base;
476 uint16_t used_desc_count = 0;
477
478 assert(vq->req_start != VQ_RING_DESC_CHAIN_END);
479 assert(iovcnt <= vq->vq_free_cnt);
480
481 /* TODO use indirect descriptors if iovcnt is high enough
482 * or the caller specifies SPDK_VIRTIO_DESC_F_INDIRECT
483 */
484
485 prev_head = vq->req_end;
486 new_head = vq->vq_desc_head_idx;
487 for (i = 0; i < iovcnt; ++i) {
488 processed_length = 0;
489 iovec_length = iovs[i].iov_len;
490 current_base = iovs[i].iov_base;
491
492 while (processed_length < iovec_length) {
493 desc = &vq->vq_ring.desc[new_head];
494 current_length = iovec_length - processed_length;
495
496 if (!vq->vdev->is_hw) {
497 desc->addr = (uintptr_t)current_base;
498 } else {
499 desc->addr = spdk_vtophys(current_base, ¤t_length);
500 }
501
502 desc->len = current_length;
503 /* always set NEXT flag. unset it on the last descriptor
504 * in the request-ending function.
505 */
506 desc->flags = desc_type | VRING_DESC_F_NEXT;
507
508 prev_head = new_head;
509 new_head = desc->next;
510 used_desc_count++;
511
512 processed_length += current_length;
513 current_base += current_length;
514 }
515 }
516
517 dxp = &vq->vq_descx[vq->req_start];
518 dxp->ndescs += used_desc_count;
519
520 vq->req_end = prev_head;
521 vq->vq_desc_head_idx = new_head;
522 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - used_desc_count);
523 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
524 assert(vq->vq_free_cnt == 0);
525 vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
526 }
527 }
528
529 #define DESC_PER_CACHELINE (SPDK_CACHE_LINE_SIZE / sizeof(struct vring_desc))
530 uint16_t
virtio_recv_pkts(struct virtqueue * vq,void ** io,uint32_t * len,uint16_t nb_pkts)531 virtio_recv_pkts(struct virtqueue *vq, void **io, uint32_t *len, uint16_t nb_pkts)
532 {
533 uint16_t nb_used, num;
534
535 nb_used = vq->vq_ring.used->idx - vq->vq_used_cons_idx;
536 virtio_rmb();
537
538 num = (uint16_t)(spdk_likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
539 if (spdk_likely(num > DESC_PER_CACHELINE)) {
540 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
541 }
542
543 return virtqueue_dequeue_burst_rx(vq, io, len, num);
544 }
545
546 int
virtio_dev_acquire_queue(struct virtio_dev * vdev,uint16_t index)547 virtio_dev_acquire_queue(struct virtio_dev *vdev, uint16_t index)
548 {
549 struct virtqueue *vq = NULL;
550
551 if (index >= vdev->max_queues) {
552 SPDK_ERRLOG("requested vq index %"PRIu16" exceeds max queue count %"PRIu16".\n",
553 index, vdev->max_queues);
554 return -1;
555 }
556
557 pthread_mutex_lock(&vdev->mutex);
558 vq = vdev->vqs[index];
559 if (vq == NULL || vq->owner_thread != NULL) {
560 pthread_mutex_unlock(&vdev->mutex);
561 return -1;
562 }
563
564 vq->owner_thread = spdk_get_thread();
565 pthread_mutex_unlock(&vdev->mutex);
566 return 0;
567 }
568
569 int32_t
virtio_dev_find_and_acquire_queue(struct virtio_dev * vdev,uint16_t start_index)570 virtio_dev_find_and_acquire_queue(struct virtio_dev *vdev, uint16_t start_index)
571 {
572 struct virtqueue *vq = NULL;
573 uint16_t i;
574
575 pthread_mutex_lock(&vdev->mutex);
576 for (i = start_index; i < vdev->max_queues; ++i) {
577 vq = vdev->vqs[i];
578 if (vq != NULL && vq->owner_thread == NULL) {
579 break;
580 }
581 }
582
583 if (vq == NULL || i == vdev->max_queues) {
584 SPDK_ERRLOG("no more unused virtio queues with idx >= %"PRIu16".\n", start_index);
585 pthread_mutex_unlock(&vdev->mutex);
586 return -1;
587 }
588
589 vq->owner_thread = spdk_get_thread();
590 pthread_mutex_unlock(&vdev->mutex);
591 return i;
592 }
593
594 struct spdk_thread *
virtio_dev_queue_get_thread(struct virtio_dev * vdev,uint16_t index)595 virtio_dev_queue_get_thread(struct virtio_dev *vdev, uint16_t index)
596 {
597 struct spdk_thread *thread = NULL;
598
599 if (index >= vdev->max_queues) {
600 SPDK_ERRLOG("given vq index %"PRIu16" exceeds max queue count %"PRIu16"\n",
601 index, vdev->max_queues);
602 abort(); /* This is not recoverable */
603 }
604
605 pthread_mutex_lock(&vdev->mutex);
606 thread = vdev->vqs[index]->owner_thread;
607 pthread_mutex_unlock(&vdev->mutex);
608
609 return thread;
610 }
611
612 bool
virtio_dev_queue_is_acquired(struct virtio_dev * vdev,uint16_t index)613 virtio_dev_queue_is_acquired(struct virtio_dev *vdev, uint16_t index)
614 {
615 return virtio_dev_queue_get_thread(vdev, index) != NULL;
616 }
617
618 void
virtio_dev_release_queue(struct virtio_dev * vdev,uint16_t index)619 virtio_dev_release_queue(struct virtio_dev *vdev, uint16_t index)
620 {
621 struct virtqueue *vq = NULL;
622
623 if (index >= vdev->max_queues) {
624 SPDK_ERRLOG("given vq index %"PRIu16" exceeds max queue count %"PRIu16".\n",
625 index, vdev->max_queues);
626 return;
627 }
628
629 pthread_mutex_lock(&vdev->mutex);
630 vq = vdev->vqs[index];
631 if (vq == NULL) {
632 SPDK_ERRLOG("virtqueue at index %"PRIu16" is not initialized.\n", index);
633 pthread_mutex_unlock(&vdev->mutex);
634 return;
635 }
636
637 assert(vq->owner_thread == spdk_get_thread());
638 vq->owner_thread = NULL;
639 pthread_mutex_unlock(&vdev->mutex);
640 }
641
642 int
virtio_dev_read_dev_config(struct virtio_dev * dev,size_t offset,void * dst,int length)643 virtio_dev_read_dev_config(struct virtio_dev *dev, size_t offset,
644 void *dst, int length)
645 {
646 return virtio_dev_backend_ops(dev)->read_dev_cfg(dev, offset, dst, length);
647 }
648
649 int
virtio_dev_write_dev_config(struct virtio_dev * dev,size_t offset,const void * src,int length)650 virtio_dev_write_dev_config(struct virtio_dev *dev, size_t offset,
651 const void *src, int length)
652 {
653 return virtio_dev_backend_ops(dev)->write_dev_cfg(dev, offset, src, length);
654 }
655
656 void
virtio_dev_stop(struct virtio_dev * dev)657 virtio_dev_stop(struct virtio_dev *dev)
658 {
659 virtio_dev_backend_ops(dev)->set_status(dev, VIRTIO_CONFIG_S_RESET);
660 /* flush status write */
661 virtio_dev_backend_ops(dev)->get_status(dev);
662 virtio_free_queues(dev);
663 }
664
665 void
virtio_dev_set_status(struct virtio_dev * dev,uint8_t status)666 virtio_dev_set_status(struct virtio_dev *dev, uint8_t status)
667 {
668 if (status != VIRTIO_CONFIG_S_RESET) {
669 status |= virtio_dev_backend_ops(dev)->get_status(dev);
670 }
671
672 virtio_dev_backend_ops(dev)->set_status(dev, status);
673 }
674
675 uint8_t
virtio_dev_get_status(struct virtio_dev * dev)676 virtio_dev_get_status(struct virtio_dev *dev)
677 {
678 return virtio_dev_backend_ops(dev)->get_status(dev);
679 }
680
681 const struct virtio_dev_ops *
virtio_dev_backend_ops(struct virtio_dev * dev)682 virtio_dev_backend_ops(struct virtio_dev *dev)
683 {
684 return dev->backend_ops;
685 }
686
687 void
virtio_dev_dump_json_info(struct virtio_dev * hw,struct spdk_json_write_ctx * w)688 virtio_dev_dump_json_info(struct virtio_dev *hw, struct spdk_json_write_ctx *w)
689 {
690 spdk_json_write_named_object_begin(w, "virtio");
691
692 spdk_json_write_named_uint32(w, "vq_count", hw->max_queues);
693
694 spdk_json_write_named_uint32(w, "vq_size",
695 virtio_dev_backend_ops(hw)->get_queue_size(hw, 0));
696
697 virtio_dev_backend_ops(hw)->dump_json_info(hw, w);
698
699 spdk_json_write_object_end(w);
700 }
701
702 SPDK_LOG_REGISTER_COMPONENT(virtio_dev)
703