1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2010-2016 Intel Corporation. All rights reserved. 3 * All rights reserved. 4 */ 5 6 #include "spdk/stdinc.h" 7 8 #include "spdk/env.h" 9 #include "spdk/util.h" 10 #include "spdk/barrier.h" 11 12 #include "spdk_internal/virtio.h" 13 14 /* We use SMP memory barrier variants as all virtio_pci devices 15 * are purely virtual. All MMIO is executed on a CPU core, so 16 * there's no need to do full MMIO synchronization. 17 */ 18 #define virtio_mb() spdk_smp_mb() 19 #define virtio_rmb() spdk_smp_rmb() 20 #define virtio_wmb() spdk_smp_wmb() 21 22 /* Chain all the descriptors in the ring with an END */ 23 static inline void 24 vring_desc_init(struct vring_desc *dp, uint16_t n) 25 { 26 uint16_t i; 27 28 for (i = 0; i < n - 1; i++) { 29 dp[i].next = (uint16_t)(i + 1); 30 } 31 dp[i].next = VQ_RING_DESC_CHAIN_END; 32 } 33 34 static void 35 virtio_init_vring(struct virtqueue *vq) 36 { 37 int size = vq->vq_nentries; 38 struct vring *vr = &vq->vq_ring; 39 uint8_t *ring_mem = vq->vq_ring_virt_mem; 40 41 /* 42 * Reinitialise since virtio port might have been stopped and restarted 43 */ 44 memset(ring_mem, 0, vq->vq_ring_size); 45 vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN); 46 vq->vq_used_cons_idx = 0; 47 vq->vq_desc_head_idx = 0; 48 vq->vq_avail_idx = 0; 49 vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); 50 vq->vq_free_cnt = vq->vq_nentries; 51 vq->req_start = VQ_RING_DESC_CHAIN_END; 52 vq->req_end = VQ_RING_DESC_CHAIN_END; 53 vq->reqs_finished = 0; 54 memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); 55 56 vring_desc_init(vr->desc, size); 57 58 /* Tell the backend not to interrupt us. 59 * If F_EVENT_IDX is negotiated, we will always set incredibly high 60 * used event idx, so that we will practically never receive an 61 * interrupt. See virtqueue_req_flush() 62 */ 63 if (vq->vdev->negotiated_features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) { 64 vring_used_event(&vq->vq_ring) = UINT16_MAX; 65 } else { 66 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 67 } 68 } 69 70 static int 71 virtio_init_queue(struct virtio_dev *dev, uint16_t vtpci_queue_idx) 72 { 73 unsigned int vq_size, size; 74 struct virtqueue *vq; 75 int rc; 76 77 SPDK_DEBUGLOG(virtio_dev, "setting up queue: %"PRIu16"\n", vtpci_queue_idx); 78 79 /* 80 * Read the virtqueue size from the Queue Size field 81 * Always power of 2 and if 0 virtqueue does not exist 82 */ 83 vq_size = virtio_dev_backend_ops(dev)->get_queue_size(dev, vtpci_queue_idx); 84 SPDK_DEBUGLOG(virtio_dev, "vq_size: %u\n", vq_size); 85 if (vq_size == 0) { 86 SPDK_ERRLOG("virtqueue %"PRIu16" does not exist\n", vtpci_queue_idx); 87 return -EINVAL; 88 } 89 90 if (!spdk_u32_is_pow2(vq_size)) { 91 SPDK_ERRLOG("virtqueue %"PRIu16" size (%u) is not powerof 2\n", 92 vtpci_queue_idx, vq_size); 93 return -EINVAL; 94 } 95 96 size = sizeof(*vq) + vq_size * sizeof(struct vq_desc_extra); 97 98 if (posix_memalign((void **)&vq, SPDK_CACHE_LINE_SIZE, size)) { 99 SPDK_ERRLOG("can not allocate vq\n"); 100 return -ENOMEM; 101 } 102 memset(vq, 0, size); 103 dev->vqs[vtpci_queue_idx] = vq; 104 105 vq->vdev = dev; 106 vq->vq_queue_index = vtpci_queue_idx; 107 vq->vq_nentries = vq_size; 108 109 /* 110 * Reserve a memzone for vring elements 111 */ 112 size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN); 113 vq->vq_ring_size = SPDK_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN); 114 SPDK_DEBUGLOG(virtio_dev, "vring_size: %u, rounded_vring_size: %u\n", 115 size, vq->vq_ring_size); 116 117 vq->owner_thread = NULL; 118 119 rc = virtio_dev_backend_ops(dev)->setup_queue(dev, vq); 120 if (rc < 0) { 121 SPDK_ERRLOG("setup_queue failed\n"); 122 free(vq); 123 dev->vqs[vtpci_queue_idx] = NULL; 124 return rc; 125 } 126 127 SPDK_DEBUGLOG(virtio_dev, "vq->vq_ring_mem: 0x%" PRIx64 "\n", 128 vq->vq_ring_mem); 129 SPDK_DEBUGLOG(virtio_dev, "vq->vq_ring_virt_mem: 0x%" PRIx64 "\n", 130 (uint64_t)(uintptr_t)vq->vq_ring_virt_mem); 131 132 virtio_init_vring(vq); 133 return 0; 134 } 135 136 static void 137 virtio_free_queues(struct virtio_dev *dev) 138 { 139 uint16_t nr_vq = dev->max_queues; 140 struct virtqueue *vq; 141 uint16_t i; 142 143 if (dev->vqs == NULL) { 144 return; 145 } 146 147 for (i = 0; i < nr_vq; i++) { 148 vq = dev->vqs[i]; 149 if (!vq) { 150 continue; 151 } 152 153 virtio_dev_backend_ops(dev)->del_queue(dev, vq); 154 155 free(vq); 156 dev->vqs[i] = NULL; 157 } 158 159 free(dev->vqs); 160 dev->vqs = NULL; 161 } 162 163 static int 164 virtio_alloc_queues(struct virtio_dev *dev, uint16_t max_queues, uint16_t fixed_vq_num) 165 { 166 uint16_t i; 167 int ret; 168 169 if (max_queues == 0) { 170 /* perfectly fine to have a device with no virtqueues. */ 171 return 0; 172 } 173 174 assert(dev->vqs == NULL); 175 dev->vqs = calloc(1, sizeof(struct virtqueue *) * max_queues); 176 if (!dev->vqs) { 177 SPDK_ERRLOG("failed to allocate %"PRIu16" vqs\n", max_queues); 178 return -ENOMEM; 179 } 180 181 for (i = 0; i < max_queues; i++) { 182 ret = virtio_init_queue(dev, i); 183 if (ret < 0) { 184 virtio_free_queues(dev); 185 return ret; 186 } 187 } 188 189 dev->max_queues = max_queues; 190 dev->fixed_queues_num = fixed_vq_num; 191 return 0; 192 } 193 194 /** 195 * Negotiate virtio features. For virtio_user this will also set 196 * dev->modern flag if VIRTIO_F_VERSION_1 flag is negotiated. 197 */ 198 static int 199 virtio_negotiate_features(struct virtio_dev *dev, uint64_t req_features) 200 { 201 uint64_t host_features = virtio_dev_backend_ops(dev)->get_features(dev); 202 int rc; 203 204 SPDK_DEBUGLOG(virtio_dev, "guest features = %" PRIx64 "\n", req_features); 205 SPDK_DEBUGLOG(virtio_dev, "device features = %" PRIx64 "\n", host_features); 206 207 rc = virtio_dev_backend_ops(dev)->set_features(dev, req_features & host_features); 208 if (rc != 0) { 209 SPDK_ERRLOG("failed to negotiate device features.\n"); 210 return rc; 211 } 212 213 SPDK_DEBUGLOG(virtio_dev, "negotiated features = %" PRIx64 "\n", 214 dev->negotiated_features); 215 216 virtio_dev_set_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); 217 if (!(virtio_dev_get_status(dev) & VIRTIO_CONFIG_S_FEATURES_OK)) { 218 SPDK_ERRLOG("failed to set FEATURES_OK status!\n"); 219 /* either the device failed, or we offered some features that 220 * depend on other, not offered features. 221 */ 222 return -EINVAL; 223 } 224 225 return 0; 226 } 227 228 int 229 virtio_dev_construct(struct virtio_dev *vdev, const char *name, 230 const struct virtio_dev_ops *ops, void *ctx) 231 { 232 int rc; 233 234 vdev->name = strdup(name); 235 if (vdev->name == NULL) { 236 return -ENOMEM; 237 } 238 239 rc = pthread_mutex_init(&vdev->mutex, NULL); 240 if (rc != 0) { 241 free(vdev->name); 242 return -rc; 243 } 244 245 vdev->backend_ops = ops; 246 vdev->ctx = ctx; 247 248 return 0; 249 } 250 251 int 252 virtio_dev_reset(struct virtio_dev *dev, uint64_t req_features) 253 { 254 req_features |= (1ULL << VIRTIO_F_VERSION_1); 255 256 virtio_dev_stop(dev); 257 258 virtio_dev_set_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); 259 if (!(virtio_dev_get_status(dev) & VIRTIO_CONFIG_S_ACKNOWLEDGE)) { 260 SPDK_ERRLOG("Failed to set VIRTIO_CONFIG_S_ACKNOWLEDGE status.\n"); 261 return -EIO; 262 } 263 264 virtio_dev_set_status(dev, VIRTIO_CONFIG_S_DRIVER); 265 if (!(virtio_dev_get_status(dev) & VIRTIO_CONFIG_S_DRIVER)) { 266 SPDK_ERRLOG("Failed to set VIRTIO_CONFIG_S_DRIVER status.\n"); 267 return -EIO; 268 } 269 270 return virtio_negotiate_features(dev, req_features); 271 } 272 273 int 274 virtio_dev_start(struct virtio_dev *vdev, uint16_t max_queues, uint16_t fixed_queue_num) 275 { 276 int ret; 277 278 ret = virtio_alloc_queues(vdev, max_queues, fixed_queue_num); 279 if (ret < 0) { 280 return ret; 281 } 282 283 virtio_dev_set_status(vdev, VIRTIO_CONFIG_S_DRIVER_OK); 284 if (!(virtio_dev_get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK)) { 285 SPDK_ERRLOG("Failed to set VIRTIO_CONFIG_S_DRIVER_OK status.\n"); 286 return -1; 287 } 288 289 return 0; 290 } 291 292 void 293 virtio_dev_destruct(struct virtio_dev *dev) 294 { 295 virtio_dev_backend_ops(dev)->destruct_dev(dev); 296 pthread_mutex_destroy(&dev->mutex); 297 free(dev->name); 298 } 299 300 static void 301 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) 302 { 303 struct vring_desc *dp, *dp_tail; 304 struct vq_desc_extra *dxp; 305 uint16_t desc_idx_last = desc_idx; 306 307 dp = &vq->vq_ring.desc[desc_idx]; 308 dxp = &vq->vq_descx[desc_idx]; 309 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs); 310 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { 311 while (dp->flags & VRING_DESC_F_NEXT) { 312 desc_idx_last = dp->next; 313 dp = &vq->vq_ring.desc[dp->next]; 314 } 315 } 316 dxp->ndescs = 0; 317 318 /* 319 * We must append the existing free chain, if any, to the end of 320 * newly freed chain. If the virtqueue was completely used, then 321 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above). 322 */ 323 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) { 324 vq->vq_desc_head_idx = desc_idx; 325 } else { 326 dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx]; 327 dp_tail->next = desc_idx; 328 } 329 330 vq->vq_desc_tail_idx = desc_idx_last; 331 dp->next = VQ_RING_DESC_CHAIN_END; 332 } 333 334 static uint16_t 335 virtqueue_dequeue_burst_rx(struct virtqueue *vq, void **rx_pkts, 336 uint32_t *len, uint16_t num) 337 { 338 struct vring_used_elem *uep; 339 void *cookie; 340 uint16_t used_idx, desc_idx; 341 uint16_t i; 342 343 /* Caller does the check */ 344 for (i = 0; i < num ; i++) { 345 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); 346 uep = &vq->vq_ring.used->ring[used_idx]; 347 desc_idx = (uint16_t) uep->id; 348 len[i] = uep->len; 349 cookie = vq->vq_descx[desc_idx].cookie; 350 351 if (spdk_unlikely(cookie == NULL)) { 352 SPDK_WARNLOG("vring descriptor with no mbuf cookie at %"PRIu16"\n", 353 vq->vq_used_cons_idx); 354 break; 355 } 356 357 __builtin_prefetch(cookie); 358 359 rx_pkts[i] = cookie; 360 vq->vq_used_cons_idx++; 361 vq_ring_free_chain(vq, desc_idx); 362 vq->vq_descx[desc_idx].cookie = NULL; 363 } 364 365 return i; 366 } 367 368 static void 369 finish_req(struct virtqueue *vq) 370 { 371 struct vring_desc *desc; 372 uint16_t avail_idx; 373 374 desc = &vq->vq_ring.desc[vq->req_end]; 375 desc->flags &= ~VRING_DESC_F_NEXT; 376 377 /* 378 * Place the head of the descriptor chain into the next slot and make 379 * it usable to the host. The chain is made available now rather than 380 * deferring to virtqueue_req_flush() in the hopes that if the host is 381 * currently running on another CPU, we can keep it processing the new 382 * descriptor. 383 */ 384 avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1)); 385 vq->vq_ring.avail->ring[avail_idx] = vq->req_start; 386 vq->vq_avail_idx++; 387 vq->req_end = VQ_RING_DESC_CHAIN_END; 388 virtio_wmb(); 389 vq->vq_ring.avail->idx = vq->vq_avail_idx; 390 vq->reqs_finished++; 391 } 392 393 int 394 virtqueue_req_start(struct virtqueue *vq, void *cookie, int iovcnt) 395 { 396 struct vq_desc_extra *dxp; 397 398 if (iovcnt > vq->vq_free_cnt) { 399 return iovcnt > vq->vq_nentries ? -EINVAL : -ENOMEM; 400 } 401 402 if (vq->req_end != VQ_RING_DESC_CHAIN_END) { 403 finish_req(vq); 404 } 405 406 vq->req_start = vq->vq_desc_head_idx; 407 dxp = &vq->vq_descx[vq->req_start]; 408 dxp->cookie = cookie; 409 dxp->ndescs = 0; 410 411 return 0; 412 } 413 414 void 415 virtqueue_req_flush(struct virtqueue *vq) 416 { 417 uint16_t reqs_finished; 418 419 if (vq->req_end == VQ_RING_DESC_CHAIN_END) { 420 /* no non-empty requests have been started */ 421 return; 422 } 423 424 finish_req(vq); 425 virtio_mb(); 426 427 reqs_finished = vq->reqs_finished; 428 vq->reqs_finished = 0; 429 430 if (vq->vdev->negotiated_features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) { 431 /* Set used event idx to a value the device will never reach. 432 * This effectively disables interrupts. 433 */ 434 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx - vq->vq_nentries - 1; 435 436 if (!vring_need_event(vring_avail_event(&vq->vq_ring), 437 vq->vq_avail_idx, 438 vq->vq_avail_idx - reqs_finished)) { 439 return; 440 } 441 } else if (vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) { 442 return; 443 } 444 445 virtio_dev_backend_ops(vq->vdev)->notify_queue(vq->vdev, vq); 446 SPDK_DEBUGLOG(virtio_dev, "Notified backend after xmit\n"); 447 } 448 449 void 450 virtqueue_req_abort(struct virtqueue *vq) 451 { 452 struct vring_desc *desc; 453 454 if (vq->req_start == VQ_RING_DESC_CHAIN_END) { 455 /* no requests have been started */ 456 return; 457 } 458 459 desc = &vq->vq_ring.desc[vq->req_end]; 460 desc->flags &= ~VRING_DESC_F_NEXT; 461 462 vq_ring_free_chain(vq, vq->req_start); 463 vq->req_start = VQ_RING_DESC_CHAIN_END; 464 } 465 466 void 467 virtqueue_req_add_iovs(struct virtqueue *vq, struct iovec *iovs, uint16_t iovcnt, 468 enum spdk_virtio_desc_type desc_type) 469 { 470 struct vring_desc *desc; 471 struct vq_desc_extra *dxp; 472 uint16_t i, prev_head, new_head; 473 474 assert(vq->req_start != VQ_RING_DESC_CHAIN_END); 475 assert(iovcnt <= vq->vq_free_cnt); 476 477 /* TODO use indirect descriptors if iovcnt is high enough 478 * or the caller specifies SPDK_VIRTIO_DESC_F_INDIRECT 479 */ 480 481 prev_head = vq->req_end; 482 new_head = vq->vq_desc_head_idx; 483 for (i = 0; i < iovcnt; ++i) { 484 desc = &vq->vq_ring.desc[new_head]; 485 486 if (!vq->vdev->is_hw) { 487 desc->addr = (uintptr_t)iovs[i].iov_base; 488 } else { 489 desc->addr = spdk_vtophys(iovs[i].iov_base, NULL); 490 } 491 492 desc->len = iovs[i].iov_len; 493 /* always set NEXT flag. unset it on the last descriptor 494 * in the request-ending function. 495 */ 496 desc->flags = desc_type | VRING_DESC_F_NEXT; 497 498 prev_head = new_head; 499 new_head = desc->next; 500 } 501 502 dxp = &vq->vq_descx[vq->req_start]; 503 dxp->ndescs += iovcnt; 504 505 vq->req_end = prev_head; 506 vq->vq_desc_head_idx = new_head; 507 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - iovcnt); 508 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) { 509 assert(vq->vq_free_cnt == 0); 510 vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END; 511 } 512 } 513 514 #define DESC_PER_CACHELINE (SPDK_CACHE_LINE_SIZE / sizeof(struct vring_desc)) 515 uint16_t 516 virtio_recv_pkts(struct virtqueue *vq, void **io, uint32_t *len, uint16_t nb_pkts) 517 { 518 uint16_t nb_used, num; 519 520 nb_used = vq->vq_ring.used->idx - vq->vq_used_cons_idx; 521 virtio_rmb(); 522 523 num = (uint16_t)(spdk_likely(nb_used <= nb_pkts) ? nb_used : nb_pkts); 524 if (spdk_likely(num > DESC_PER_CACHELINE)) { 525 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); 526 } 527 528 return virtqueue_dequeue_burst_rx(vq, io, len, num); 529 } 530 531 int 532 virtio_dev_acquire_queue(struct virtio_dev *vdev, uint16_t index) 533 { 534 struct virtqueue *vq = NULL; 535 536 if (index >= vdev->max_queues) { 537 SPDK_ERRLOG("requested vq index %"PRIu16" exceeds max queue count %"PRIu16".\n", 538 index, vdev->max_queues); 539 return -1; 540 } 541 542 pthread_mutex_lock(&vdev->mutex); 543 vq = vdev->vqs[index]; 544 if (vq == NULL || vq->owner_thread != NULL) { 545 pthread_mutex_unlock(&vdev->mutex); 546 return -1; 547 } 548 549 vq->owner_thread = spdk_get_thread(); 550 pthread_mutex_unlock(&vdev->mutex); 551 return 0; 552 } 553 554 int32_t 555 virtio_dev_find_and_acquire_queue(struct virtio_dev *vdev, uint16_t start_index) 556 { 557 struct virtqueue *vq = NULL; 558 uint16_t i; 559 560 pthread_mutex_lock(&vdev->mutex); 561 for (i = start_index; i < vdev->max_queues; ++i) { 562 vq = vdev->vqs[i]; 563 if (vq != NULL && vq->owner_thread == NULL) { 564 break; 565 } 566 } 567 568 if (vq == NULL || i == vdev->max_queues) { 569 SPDK_ERRLOG("no more unused virtio queues with idx >= %"PRIu16".\n", start_index); 570 pthread_mutex_unlock(&vdev->mutex); 571 return -1; 572 } 573 574 vq->owner_thread = spdk_get_thread(); 575 pthread_mutex_unlock(&vdev->mutex); 576 return i; 577 } 578 579 struct spdk_thread * 580 virtio_dev_queue_get_thread(struct virtio_dev *vdev, uint16_t index) 581 { 582 struct spdk_thread *thread = NULL; 583 584 if (index >= vdev->max_queues) { 585 SPDK_ERRLOG("given vq index %"PRIu16" exceeds max queue count %"PRIu16"\n", 586 index, vdev->max_queues); 587 abort(); /* This is not recoverable */ 588 } 589 590 pthread_mutex_lock(&vdev->mutex); 591 thread = vdev->vqs[index]->owner_thread; 592 pthread_mutex_unlock(&vdev->mutex); 593 594 return thread; 595 } 596 597 bool 598 virtio_dev_queue_is_acquired(struct virtio_dev *vdev, uint16_t index) 599 { 600 return virtio_dev_queue_get_thread(vdev, index) != NULL; 601 } 602 603 void 604 virtio_dev_release_queue(struct virtio_dev *vdev, uint16_t index) 605 { 606 struct virtqueue *vq = NULL; 607 608 if (index >= vdev->max_queues) { 609 SPDK_ERRLOG("given vq index %"PRIu16" exceeds max queue count %"PRIu16".\n", 610 index, vdev->max_queues); 611 return; 612 } 613 614 pthread_mutex_lock(&vdev->mutex); 615 vq = vdev->vqs[index]; 616 if (vq == NULL) { 617 SPDK_ERRLOG("virtqueue at index %"PRIu16" is not initialized.\n", index); 618 pthread_mutex_unlock(&vdev->mutex); 619 return; 620 } 621 622 assert(vq->owner_thread == spdk_get_thread()); 623 vq->owner_thread = NULL; 624 pthread_mutex_unlock(&vdev->mutex); 625 } 626 627 int 628 virtio_dev_read_dev_config(struct virtio_dev *dev, size_t offset, 629 void *dst, int length) 630 { 631 return virtio_dev_backend_ops(dev)->read_dev_cfg(dev, offset, dst, length); 632 } 633 634 int 635 virtio_dev_write_dev_config(struct virtio_dev *dev, size_t offset, 636 const void *src, int length) 637 { 638 return virtio_dev_backend_ops(dev)->write_dev_cfg(dev, offset, src, length); 639 } 640 641 void 642 virtio_dev_stop(struct virtio_dev *dev) 643 { 644 virtio_dev_backend_ops(dev)->set_status(dev, VIRTIO_CONFIG_S_RESET); 645 /* flush status write */ 646 virtio_dev_backend_ops(dev)->get_status(dev); 647 virtio_free_queues(dev); 648 } 649 650 void 651 virtio_dev_set_status(struct virtio_dev *dev, uint8_t status) 652 { 653 if (status != VIRTIO_CONFIG_S_RESET) { 654 status |= virtio_dev_backend_ops(dev)->get_status(dev); 655 } 656 657 virtio_dev_backend_ops(dev)->set_status(dev, status); 658 } 659 660 uint8_t 661 virtio_dev_get_status(struct virtio_dev *dev) 662 { 663 return virtio_dev_backend_ops(dev)->get_status(dev); 664 } 665 666 const struct virtio_dev_ops * 667 virtio_dev_backend_ops(struct virtio_dev *dev) 668 { 669 return dev->backend_ops; 670 } 671 672 void 673 virtio_dev_dump_json_info(struct virtio_dev *hw, struct spdk_json_write_ctx *w) 674 { 675 spdk_json_write_named_object_begin(w, "virtio"); 676 677 spdk_json_write_named_uint32(w, "vq_count", hw->max_queues); 678 679 spdk_json_write_named_uint32(w, "vq_size", 680 virtio_dev_backend_ops(hw)->get_queue_size(hw, 0)); 681 682 virtio_dev_backend_ops(hw)->dump_json_info(hw, w); 683 684 spdk_json_write_object_end(w); 685 } 686 687 SPDK_LOG_REGISTER_COMPONENT(virtio_dev) 688