Lines Matching defs:vq
99 typedef int (*vhost_vring_inject_irq_cb)(struct virtio_net *dev, struct vhost_virtqueue *vq);
249 * vq->access_lock cannot protect pkts_cmpl_flag of vring1.
317 /* Protected by vq->access_lock */
352 /* Index of this vq in dev->virtqueue[] */
562 vhost_virtqueue_reconnect_log_split(struct vhost_virtqueue *vq)
564 if (vq->reconnect_log != NULL)
565 vq->reconnect_log->last_avail_idx = vq->last_avail_idx;
569 vhost_virtqueue_reconnect_log_packed(struct vhost_virtqueue *vq)
571 if (vq->reconnect_log != NULL) {
572 vq->reconnect_log->last_avail_idx = vq->last_avail_idx;
573 vq->reconnect_log->avail_wrap_counter = vq->avail_wrap_counter;
578 vq_assert_lock__(struct virtio_net *dev, struct vhost_virtqueue *vq, const char *func)
579 __rte_assert_exclusive_lock(&vq->access_lock)
581 if (unlikely(!rte_rwlock_write_is_locked(&vq->access_lock)))
585 #define vq_assert_lock(dev, vq) vq_assert_lock__(dev, vq, __func__)
604 vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num)
606 vq->last_used_idx += num;
607 if (vq->last_used_idx >= vq->size) {
608 vq->used_wrap_counter ^= 1;
609 vq->last_used_idx -= vq->size;
614 vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
616 vq->last_avail_idx += num;
617 if (vq->last_avail_idx >= vq->size) {
618 vq->avail_wrap_counter ^= 1;
619 vq->last_avail_idx -= vq->size;
621 vhost_virtqueue_reconnect_log_packed(vq);
625 struct vhost_virtqueue *vq,
628 struct vhost_virtqueue *vq,
630 __rte_shared_locks_required(&vq->iotlb_lock);
632 struct vhost_virtqueue *vq);
635 void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
637 __rte_shared_locks_required(&vq->iotlb_lock);
647 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
650 __vhost_log_cache_sync(dev, vq);
654 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
658 __vhost_log_cache_write(dev, vq, addr, len);
662 vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
666 if (unlikely(vq->log_guest_addr == 0))
668 __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset,
674 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
678 if (unlikely(vq->log_guest_addr == 0))
680 __vhost_log_write(dev, vq->log_guest_addr + offset, len);
685 vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
687 __rte_shared_locks_required(&vq->iotlb_lock)
693 __vhost_log_cache_write_iova(dev, vq, iova, len);
695 __vhost_log_cache_write(dev, vq, iova, len);
699 vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
701 __rte_shared_locks_required(&vq->iotlb_lock)
707 __vhost_log_write_iova(dev, vq, iova, len);
880 void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
881 void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq);
882 void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
894 struct vhost_virtqueue *vq, int enable);
905 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
907 __rte_shared_locks_required(&vq->iotlb_lock);
909 struct vhost_virtqueue *vq,
911 __rte_shared_locks_required(&vq->iotlb_lock);
912 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
913 __rte_exclusive_locks_required(&vq->access_lock)
914 __rte_shared_locks_required(&vq->iotlb_lock);
915 uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
917 __rte_shared_locks_required(&vq->iotlb_lock);
918 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
919 __rte_exclusive_locks_required(&vq->access_lock);
922 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
924 __rte_shared_locks_required(&vq->iotlb_lock)
929 return __vhost_iova_to_vva(dev, vq, iova, len, perm);
950 vhost_vring_inject_irq(struct virtio_net *dev, struct vhost_virtqueue *vq)
955 if (rte_atomic_compare_exchange_strong_explicit(&vq->irq_pending, &expected, true,
957 if (dev->notify_ops->guest_notify(dev->vid, vq->index)) {
960 &vq->stats.guest_notifications_offloaded,
966 rte_atomic_store_explicit(&vq->irq_pending, false,
969 vq->stats.guest_notifications_suppressed++;
974 if (dev->backend_ops->inject_irq(dev, vq)) {
976 rte_atomic_fetch_add_explicit(&vq->stats.guest_notifications_error,
982 rte_atomic_fetch_add_explicit(&vq->stats.guest_notifications,
989 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
996 uint16_t old = vq->signalled_used;
997 uint16_t new = vq->last_used_idx;
998 bool signalled_used_valid = vq->signalled_used_valid;
1000 vq->signalled_used = new;
1001 vq->signalled_used_valid = true;
1005 __func__, vhost_used_event(vq), old, new);
1007 if (vhost_need_event(vhost_used_event(vq), new, old) ||
1009 vhost_vring_inject_irq(dev, vq);
1012 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
1013 vhost_vring_inject_irq(dev, vq);
1018 vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
1027 if (vq->driver_event->flags !=
1033 old = vq->signalled_used;
1034 new = vq->last_used_idx;
1035 vq->signalled_used = new;
1036 signalled_used_valid = vq->signalled_used_valid;
1037 vq->signalled_used_valid = true;
1039 if (vq->driver_event->flags != VRING_EVENT_F_DESC) {
1040 if (vq->driver_event->flags != VRING_EVENT_F_DISABLE)
1052 off_wrap = vq->driver_event->off_wrap;
1056 old -= vq->size;
1058 if (vq->used_wrap_counter != off_wrap >> 15)
1059 off -= vq->size;
1065 vhost_vring_inject_irq(dev, vq);