Lines Matching defs:async

116 		 * case, please stop async data-path and check
134 dma_info->pkts_cmpl_flag_addr[copy_idx & ring_mask] = &vq->async->pkts_cmpl_flag[flag_idx];
432 struct vhost_async *async = vq->async;
435 async->buffers_packed[async->buffer_idx_packed].id = ids[i];
436 async->buffers_packed[async->buffer_idx_packed].len = lens[i];
437 async->buffers_packed[async->buffer_idx_packed].count = 1;
438 async->buffer_idx_packed++;
439 if (async->buffer_idx_packed >= vq->size)
440 async->buffer_idx_packed -= vq->size;
449 struct vhost_async *async = vq->async;
452 async->buffers_packed[async->buffer_idx_packed].id = ids[i];
453 async->buffers_packed[async->buffer_idx_packed].len = 0;
454 async->buffers_packed[async->buffer_idx_packed].count = 1;
456 async->buffer_idx_packed++;
457 if (async->buffer_idx_packed >= vq->size)
458 async->buffer_idx_packed -= vq->size;
611 struct vhost_async *async = vq->async;
614 async->buffers_packed[async->buffer_idx_packed].id = id[i];
615 async->buffers_packed[async->buffer_idx_packed].len = len[i];
616 async->buffers_packed[async->buffer_idx_packed].count = count[i];
617 async->buffer_idx_packed++;
618 if (async->buffer_idx_packed >= vq->size)
619 async->buffer_idx_packed -= vq->size;
1048 async_iter_initialize(struct virtio_net *dev, struct vhost_async *async)
1052 if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
1053 VHOST_DATA_LOG(dev->ifname, ERR, "no more async iovec available");
1057 iter = async->iov_iter + async->iter_idx;
1058 iter->iov = async->iovec + async->iovec_idx;
1065 async_iter_add_iovec(struct virtio_net *dev, struct vhost_async *async,
1071 if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
1075 VHOST_DATA_LOG(dev->ifname, ERR, "no more async iovec available");
1082 iter = async->iov_iter + async->iter_idx;
1083 iovec = async->iovec + async->iovec_idx;
1090 async->iovec_idx++;
1096 async_iter_finalize(struct vhost_async *async)
1098 async->iter_idx++;
1102 async_iter_cancel(struct vhost_async *async)
1106 iter = async->iov_iter + async->iter_idx;
1107 async->iovec_idx -= iter->nr_segs;
1113 async_iter_reset(struct vhost_async *async)
1115 async->iter_idx = 0;
1116 async->iovec_idx = 0;
1126 struct vhost_async *async = vq->async;
1150 if (unlikely(async_iter_add_iovec(dev, async, src, dst, (size_t)mapped_len)))
1214 struct vhost_async *async = vq->async;
1252 if (async_iter_initialize(dev, async))
1317 async_iter_finalize(async);
1322 async_iter_cancel(async);
1772 struct vhost_async *async = vq->async;
1774 if (async->pkts_idx >= async->pkts_inflight_n)
1775 return async->pkts_idx - async->pkts_inflight_n;
1777 return vq->size - async->pkts_inflight_n + async->pkts_idx;
1807 struct vhost_async *async = vq->async;
1808 struct async_inflight_info *pkts_info = async->pkts_info;
1821 async_iter_reset(async);
1844 slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
1855 n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
1856 async->iov_iter, pkt_idx);
1883 uint16_t to = async->desc_idx_split & (vq->size - 1);
1886 async->descs_split, vq->size, 0, to,
1889 async->desc_idx_split += vq->shadow_used_idx;
1891 async->pkts_idx += pkt_idx;
1892 if (async->pkts_idx >= vq->size)
1893 async->pkts_idx -= vq->size;
1895 async->pkts_inflight_n += pkt_idx;
2000 struct vhost_async *async = vq->async;
2034 async_iter_initialize(dev, async);
2035 async_iter_add_iovec(dev, async,
2039 async->iter_idx++;
2077 struct vhost_async *async = vq->async;
2078 struct async_inflight_info *pkts_info = vq->async->pkts_info;
2096 if (async->buffer_idx_packed >= buffers_err)
2097 async->buffer_idx_packed -= buffers_err;
2099 async->buffer_idx_packed = async->buffer_idx_packed + vq->size - buffers_err;
2113 struct vhost_async *async = vq->async;
2114 struct async_inflight_info *pkts_info = async->pkts_info;
2126 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
2142 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
2155 n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
2156 async->iov_iter, pkt_idx);
2158 async_iter_reset(async);
2168 async->pkts_idx += pkt_idx;
2169 if (async->pkts_idx >= vq->size)
2170 async->pkts_idx -= vq->size;
2172 async->pkts_inflight_n += pkt_idx;
2181 struct vhost_async *async = vq->async;
2187 from = async->last_desc_idx_split & (vq->size - 1);
2192 rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
2197 rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
2199 rte_memcpy(&vq->used->ring[0], &async->descs_split[from + size],
2203 async->last_desc_idx_split += nr_copy;
2214 struct vhost_async *async = vq->async;
2215 uint16_t from = async->last_buffer_idx_packed;
2223 vq->desc_packed[used_idx].id = async->buffers_packed[from].id;
2224 vq->desc_packed[used_idx].len = async->buffers_packed[from].len;
2226 used_idx += async->buffers_packed[from].count;
2238 from = async->last_buffer_idx_packed;
2243 if (async->buffers_packed[from].len)
2263 vq_inc_last_used_packed(vq, async->buffers_packed[from].count);
2271 async->last_buffer_idx_packed = from;
2279 struct vhost_async *async = vq->async;
2280 struct async_inflight_info *pkts_info = async->pkts_info;
2297 while (vq->async->pkts_cmpl_flag[from] && count--) {
2298 vq->async->pkts_cmpl_flag[from] = false;
2317 async->pkts_inflight_n -= nr_cpl_pkts;
2332 async->last_buffer_idx_packed += n_buffers;
2333 if (async->last_buffer_idx_packed >= vq->size)
2334 async->last_buffer_idx_packed -= vq->size;
2336 async->last_desc_idx_split += n_descs;
2380 if (unlikely(!vq->async)) {
2382 "%s: async not registered for virtqueue %d.",
2427 if (unlikely(!vq->async)) {
2429 "%s: async not registered for virtqueue %d.",
2487 if (unlikely(!vq->async)) {
2488 VHOST_DATA_LOG(dev->ifname, ERR, "%s: async not registered for queue id %u.",
2534 if (unlikely(!vq->enabled || !vq->async))
2905 struct vhost_async *async = vq->async;
2947 pkts_info = async->pkts_info;
2948 if (async_iter_initialize(dev, async))
3018 async_iter_finalize(async);
3028 async_iter_cancel(async);
3704 struct async_inflight_info *pkts_info = vq->async->pkts_info;
3711 while (vq->async->pkts_cmpl_flag[from] && count--) {
3712 vq->async->pkts_cmpl_flag[from] = false;
3739 vq->async->pkts_inflight_n -= nr_cpl_pkts;
3758 struct vhost_async *async = vq->async;
3759 struct async_inflight_info *pkts_info = async->pkts_info;
3774 async_iter_reset(async);
3827 slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
3833 "%s: Failed to offload copies to async channel.",
3845 to = async->desc_idx_split & (vq->size - 1);
3846 async->descs_split[to].id = head_idx;
3847 async->descs_split[to].len = 0;
3848 async->desc_idx_split++;
3857 n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
3858 async->iov_iter, pkt_idx);
3860 async->pkts_inflight_n += n_xfer;
3873 * recover async channel copy related structures and free pktmbufs
3876 async->desc_idx_split -= pkt_err;
3883 async->pkts_idx += pkt_idx;
3884 if (async->pkts_idx >= vq->size)
3885 async->pkts_idx -= vq->size;
3926 struct vhost_async *async = vq->async;
3927 uint16_t idx = async->buffer_idx_packed;
3929 async->buffers_packed[idx].id = buf_id;
3930 async->buffers_packed[idx].len = 0;
3931 async->buffers_packed[idx].count = count;
3933 async->buffer_idx_packed++;
3934 if (async->buffer_idx_packed >= vq->size)
3935 async->buffer_idx_packed -= vq->size;
3954 struct vhost_async *async = vq->async;
3955 struct async_inflight_info *pkts_info = async->pkts_info;
3986 /* update async shadow packed ring */
4004 struct vhost_async *async = vq->async;
4005 struct async_inflight_info *pkts_info = async->pkts_info;
4029 async_iter_initialize(dev, async);
4030 async_iter_add_iovec(dev, async,
4034 async->iter_idx++;
4066 struct vhost_async *async = vq->async;
4067 struct async_inflight_info *pkts_info = async->pkts_info;
4072 async_iter_reset(async);
4084 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
4089 slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
4115 n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
4116 async->iov_iter, pkt_idx);
4118 async->pkts_inflight_n += n_xfer;
4130 if (async->buffer_idx_packed >= pkt_err)
4131 async->buffer_idx_packed -= pkt_err;
4133 async->buffer_idx_packed += vq->size - pkt_err;
4155 async->pkts_idx += pkt_idx;
4156 if (async->pkts_idx >= vq->size)
4157 async->pkts_idx -= vq->size;
4239 if (unlikely(!vq->async)) {
4240 VHOST_DATA_LOG(dev->ifname, ERR, "%s: async not registered for queue id %d.",
4302 *nr_inflight = vq->async->pkts_inflight_n;