Lines Matching defs:vq

113  * Later, a handler may need to ensure the vq has been locked (for example,
121 #define VHOST_USER_ASSERT_LOCK(dev, vq, id) do { \
124 vq_assert_lock(dev, vq); \
305 vhost_user_notify_queue_state(struct virtio_net *dev, struct vhost_virtqueue *vq,
311 if (enable && vq->notif_enable != VIRTIO_UNINITIALIZED_NOTIF)
312 vhost_enable_guest_notification(dev, vq, vq->notif_enable);
315 vdpa_dev->ops->set_vring_state(dev->vid, vq->index, enable);
318 dev->notify_ops->vring_state_changed(dev->vid, vq->index, enable);
453 struct vhost_virtqueue *vq;
455 vq = dev->virtqueue[--dev->nr_vring];
456 if (!vq)
460 cleanup_vq(vq, 1);
461 cleanup_vq_inflight(dev, vq);
463 VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_FEATURES);
464 rte_rwlock_write_unlock(&vq->access_lock);
465 free_vq(dev, vq);
486 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index];
495 vq->size = ctx->msg.payload.state.num;
507 if (vq->size & (vq->size - 1)) {
510 vq->size);
516 rte_free(vq->shadow_used_packed);
517 vq->shadow_used_packed = rte_malloc_socket(NULL,
518 vq->size *
520 RTE_CACHE_LINE_SIZE, vq->numa_node);
521 if (!vq->shadow_used_packed) {
528 rte_free(vq->shadow_used_split);
530 vq->shadow_used_split = rte_malloc_socket(NULL,
531 vq->size * sizeof(struct vring_used_elem),
532 RTE_CACHE_LINE_SIZE, vq->numa_node);
534 if (!vq->shadow_used_split) {
536 "failed to allocate memory for vq internal data.");
541 rte_free(vq->batch_copy_elems);
542 vq->batch_copy_elems = rte_malloc_socket(NULL,
543 vq->size * sizeof(struct batch_copy_elem),
544 RTE_CACHE_LINE_SIZE, vq->numa_node);
545 if (!vq->batch_copy_elems) {
564 struct vhost_virtqueue *vq;
572 vq = *pvq;
578 if (vq->ready)
581 ret = get_mempolicy(&node, NULL, 0, vq->desc, MPOL_F_NODE | MPOL_F_ADDR);
585 vq->index);
589 if (node == vq->numa_node)
592 vq = rte_realloc_socket(*pvq, sizeof(**pvq), 0, node);
593 if (!vq) {
599 *pvq = vq;
601 if (vq != dev->virtqueue[vq->index]) {
603 dev->virtqueue[vq->index] = vq;
609 sup = rte_realloc_socket(vq->shadow_used_packed, vq->size * sizeof(*sup),
617 vq->shadow_used_packed = sup;
621 sus = rte_realloc_socket(vq->shadow_used_split, vq->size * sizeof(*sus),
629 vq->shadow_used_split = sus;
632 bce = rte_realloc_socket(vq->batch_copy_elems, vq->size * sizeof(*bce),
640 vq->batch_copy_elems = bce;
642 if (vq->log_cache) {
645 lc = rte_realloc_socket(vq->log_cache, sizeof(*lc) * VHOST_LOG_CACHE_NR, 0, node);
652 vq->log_cache = lc;
655 if (vq->resubmit_inflight) {
658 ri = rte_realloc_socket(vq->resubmit_inflight, sizeof(*ri), 0, node);
665 vq->resubmit_inflight = ri;
682 vq->numa_node = node;
777 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
783 vhost_user_iotlb_rd_lock(vq);
784 vva = vhost_iova_to_vva(dev, vq, ra,
786 vhost_user_iotlb_rd_unlock(vq);
795 log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq)
799 vhost_user_iotlb_rd_lock(vq);
800 log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr);
801 vhost_user_iotlb_rd_unlock(vq);
843 struct vhost_virtqueue *vq;
848 vq = *pvq;
850 vq_assert_lock(dev, vq);
852 if (vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG)) {
853 vq->log_guest_addr =
854 log_addr_to_gpa(dev, vq);
855 if (vq->log_guest_addr == 0) {
862 len = sizeof(struct vring_packed_desc) * vq->size;
863 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
864 ring_addr_to_vva(dev, vq, vq->ring_addrs.desc_user_addr, &len);
865 if (vq->desc_packed == NULL ||
867 vq->size) {
872 mem_set_dump(dev, vq->desc_packed, len, true,
873 hua_to_alignment(dev->mem, vq->desc_packed));
874 numa_realloc(&dev, &vq);
876 *pvq = vq;
879 vq->driver_event = (struct vring_packed_desc_event *)
881 vq, vq->ring_addrs.avail_user_addr, &len);
882 if (vq->driver_event == NULL ||
889 mem_set_dump(dev, vq->driver_event, len, true,
890 hua_to_alignment(dev->mem, vq->driver_event));
892 vq->device_event = (struct vring_packed_desc_event *)
894 vq, vq->ring_addrs.used_user_addr, &len);
895 if (vq->device_event == NULL ||
902 mem_set_dump(dev, vq->device_event, len, true,
903 hua_to_alignment(dev->mem, vq->device_event));
904 vq->access_ok = true;
909 if (vq->desc && vq->avail && vq->used)
912 len = sizeof(struct vring_desc) * vq->size;
913 vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
914 vq, vq->ring_addrs.desc_user_addr, &len);
915 if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
920 mem_set_dump(dev, vq->desc, len, true, hua_to_alignment(dev->mem, vq->desc));
921 numa_realloc(&dev, &vq);
923 *pvq = vq;
925 len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
929 vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
930 vq, vq->ring_addrs.avail_user_addr, &len);
931 if (vq->avail == 0 || len != expected_len) {
936 mem_set_dump(dev, vq->avail, len, true, hua_to_alignment(dev->mem, vq->avail));
938 sizeof(struct vring_used_elem) * vq->size;
942 vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
943 vq, vq->ring_addrs.used_user_addr, &len);
944 if (vq->used == 0 || len != expected_len) {
949 mem_set_dump(dev, vq->used, len, true, hua_to_alignment(dev->mem, vq->used));
951 if (vq->last_used_idx != vq->used->idx) {
953 "last_used_idx (%u) and vq->used->idx (%u) mismatches;",
954 vq->last_used_idx, vq->used->idx);
955 vq->last_used_idx = vq->used->idx;
956 vq->last_avail_idx = vq->used->idx;
957 vhost_virtqueue_reconnect_log_split(vq);
962 vq->access_ok = true;
964 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "mapped address desc: %p", vq->desc);
965 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "mapped address avail: %p", vq->avail);
966 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "mapped address used: %p", vq->used);
967 VHOST_CONFIG_LOG(dev->ifname, DEBUG, "log_guest_addr: %" PRIx64, vq->log_guest_addr);
980 struct vhost_virtqueue *vq;
988 vq = dev->virtqueue[ctx->msg.payload.addr.index];
994 memcpy(&vq->ring_addrs, addr, sizeof(*addr));
1000 VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ADDR);
1002 access_ok = vq->access_ok;
1004 vring_invalidate(dev, vq);
1006 if ((vq->enabled && (dev->features &
1009 translate_ring_addresses(&dev, &vq);
1026 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index];
1034 vq->last_avail_idx = val & 0x7fff;
1035 vq->avail_wrap_counter = !!(val & (0x1 << 15));
1041 vq->last_used_idx = vq->last_avail_idx;
1042 vq->used_wrap_counter = vq->avail_wrap_counter;
1043 vhost_virtqueue_reconnect_log_packed(vq);
1045 vq->last_used_idx = ctx->msg.payload.state.num;
1046 vq->last_avail_idx = ctx->msg.payload.state.num;
1047 vhost_virtqueue_reconnect_log_split(vq);
1052 ctx->msg.payload.state.index, vq->last_used_idx, vq->last_avail_idx);
1505 struct vhost_virtqueue *vq = dev->virtqueue[i];
1507 if (!vq)
1510 if (vq->desc || vq->avail || vq->used) {
1512 VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_MEM_TABLE);
1519 vring_invalidate(dev, vq);
1521 translate_ring_addresses(&dev, &vq);
1549 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
1553 if (!vq)
1557 rings_ok = vq->desc_packed && vq->driver_event &&
1558 vq->device_event;
1560 rings_ok = vq->desc && vq->avail && vq->used;
1563 vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1564 vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1565 vq->enabled;
1575 struct vhost_virtqueue *vq;
1602 vq = dev->virtqueue[i];
1604 if (!vq_is_ready(dev, vq))
1790 struct vhost_virtqueue *vq;
1873 vq = dev->virtqueue[i];
1874 if (!vq)
1877 cleanup_vq_inflight(dev, vq);
1879 vq->inflight_packed = addr;
1880 vq->inflight_packed->desc_num = queue_size;
1882 vq->inflight_split = addr;
1883 vq->inflight_split->desc_num = queue_size;
1898 struct vhost_virtqueue *vq;
1914 vq = dev->virtqueue[file.index];
1916 if (vq->ready) {
1917 vq->ready = false;
1918 vhost_user_notify_queue_state(dev, vq, 0);
1921 if (vq->callfd >= 0)
1922 close(vq->callfd);
1924 vq->callfd = file.fd;
1961 struct vhost_virtqueue *vq)
1965 struct vring_used *used = vq->used;
1976 if ((!vq->inflight_split))
1979 if (!vq->inflight_split->version) {
1980 vq->inflight_split->version = INFLIGHT_VERSION;
1984 if (vq->resubmit_inflight)
1987 inflight_split = vq->inflight_split;
1988 vq->global_counter = 0;
2002 vq->last_avail_idx += resubmit_num;
2003 vhost_virtqueue_reconnect_log_split(vq);
2007 0, vq->numa_node);
2016 0, vq->numa_node);
2025 for (i = 0; i < vq->inflight_split->desc_num; i++) {
2026 if (vq->inflight_split->desc[i].inflight == 1) {
2040 vq->global_counter = resubmit->resubmit_list[0].counter + 1;
2041 vq->resubmit_inflight = resubmit;
2049 struct vhost_virtqueue *vq)
2063 if ((!vq->inflight_packed))
2066 if (!vq->inflight_packed->version) {
2067 vq->inflight_packed->version = INFLIGHT_VERSION;
2071 if (vq->resubmit_inflight)
2074 inflight_packed = vq->inflight_packed;
2075 vq->global_counter = 0;
2103 0, vq->numa_node);
2112 0, vq->numa_node);
2122 if (vq->inflight_packed->desc[i].inflight == 1) {
2136 vq->global_counter = resubmit->resubmit_list[0].counter + 1;
2137 vq->resubmit_inflight = resubmit;
2150 struct vhost_virtqueue *vq;
2167 vq = dev->virtqueue[file.index];
2168 translate_ring_addresses(&dev, &vq);
2177 vq->enabled = true;
2180 if (vq->ready) {
2181 vq->ready = false;
2182 vhost_user_notify_queue_state(dev, vq, 0);
2185 if (vq->kickfd >= 0)
2186 close(vq->kickfd);
2187 vq->kickfd = file.fd;
2190 if (vhost_check_queue_inflights_packed(dev, vq)) {
2192 "failed to inflights for vq: %d",
2197 if (vhost_check_queue_inflights_split(dev, vq)) {
2199 "failed to inflights for vq: %d",
2217 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index];
2232 val = vq->last_avail_idx & 0x7fff;
2233 val |= vq->avail_wrap_counter << 15;
2236 ctx->msg.payload.state.num = vq->last_avail_idx;
2247 if (vq->kickfd >= 0)
2248 close(vq->kickfd);
2250 vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
2252 if (vq->callfd >= 0)
2253 close(vq->callfd);
2255 vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
2257 vq->signalled_used_valid = false;
2260 rte_free(vq->shadow_used_packed);
2261 vq->shadow_used_packed = NULL;
2263 rte_free(vq->shadow_used_split);
2264 vq->shadow_used_split = NULL;
2267 rte_free(vq->batch_copy_elems);
2268 vq->batch_copy_elems = NULL;
2270 rte_free(vq->log_cache);
2271 vq->log_cache = NULL;
2278 rte_rwlock_write_lock(&vq->access_lock);
2279 vring_invalidate(dev, vq);
2280 memset(&vq->ring_addrs, 0, sizeof(struct vhost_vring_addr));
2281 rte_rwlock_write_unlock(&vq->access_lock);
2296 struct vhost_virtqueue *vq;
2304 vq = dev->virtqueue[index];
2307 VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ENABLE);
2308 if (enable && vq->async && vq->async->pkts_inflight_n) {
2315 vq->enabled = enable;
2428 struct vhost_virtqueue *vq = dev->virtqueue[i];
2430 rte_free(vq->log_cache);
2431 vq->log_cache = NULL;
2432 vq->log_cache_nb_elem = 0;
2433 vq->log_cache = rte_malloc_socket("vq log cache",
2435 0, vq->numa_node);
2440 if (!vq->log_cache)
2558 is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
2566 ra = &vq->ring_addrs;
2567 len = sizeof(struct vring_desc) * vq->size;
2571 len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
2576 sizeof(struct vring_used_elem) * vq->size;
2591 is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
2599 ra = &vq->ring_addrs;
2600 len = sizeof(struct vring_packed_desc) * vq->size;
2623 struct vhost_virtqueue *vq,
2627 return is_vring_iotlb_packed(vq, imsg);
2629 return is_vring_iotlb_split(vq, imsg);
2728 struct vhost_virtqueue *vq = dev->virtqueue[i];
2730 if (!vq)
2733 if (is_vring_iotlb(dev, vq, imsg)) {
2734 rte_rwlock_write_lock(&vq->access_lock);
2735 translate_ring_addresses(&dev, &vq);
2737 rte_rwlock_write_unlock(&vq->access_lock);
2745 struct vhost_virtqueue *vq = dev->virtqueue[i];
2747 if (!vq)
2750 if (is_vring_iotlb(dev, vq, imsg)) {
2751 rte_rwlock_write_lock(&vq->access_lock);
2752 vring_invalidate(dev, vq);
2753 rte_rwlock_write_unlock(&vq->access_lock);
3076 struct vhost_virtqueue *vq = dev->virtqueue[i];
3078 if (vq) {
3079 rte_rwlock_write_lock(&vq->access_lock);
3094 struct vhost_virtqueue *vq = dev->virtqueue[i];
3096 if (vq) {
3097 rte_rwlock_write_unlock(&vq->access_lock);
3281 struct vhost_virtqueue *vq = dev->virtqueue[i];
3282 bool cur_ready = vq_is_ready(dev, vq);
3284 if (cur_ready != (vq && vq->ready)) {
3285 vq->ready = cur_ready;
3286 vhost_user_notify_queue_state(dev, vq, cur_ready);
3494 vhost_user_inject_irq(struct virtio_net *dev __rte_unused, struct vhost_virtqueue *vq)
3496 if (vq->callfd < 0)
3499 return eventfd_write(vq->callfd, (eventfd_t)1);