Home
last modified time | relevance | path

Searched defs:vq (Results 1 – 25 of 31) sorted by relevance

12

/dpdk/drivers/net/virtio/
H A Dvirtqueue.c23 virtqueue_detach_unused(struct virtqueue *vq) in virtqueue_detach_unused()
64 virtqueue_rxvq_flush_packed(struct virtqueue *vq) in virtqueue_rxvq_flush_packed()
91 virtqueue_rxvq_flush_split(struct virtqueue *vq) in virtqueue_rxvq_flush_split()
140 virtqueue_rxvq_flush(struct virtqueue *vq) in virtqueue_rxvq_flush()
151 virtqueue_txq_indirect_header_init_packed(struct virtqueue *vq, uint32_t idx) in virtqueue_txq_indirect_header_init_packed()
167 virtqueue_txq_indirect_header_init_split(struct virtqueue *vq, uint32_t idx) in virtqueue_txq_indirect_header_init_split()
184 virtqueue_txq_indirect_headers_init(struct virtqueue *vq) in virtqueue_txq_indirect_headers_init()
199 virtqueue_rxvq_reset_packed(struct virtqueue *vq) in virtqueue_rxvq_reset_packed()
233 virtqueue_txvq_reset_packed(struct virtqueue *vq) in virtqueue_txvq_reset_packed()
269 virtio_init_vring(struct virtqueue *vq) in virtio_init_vring()
[all …]
H A Dvirtqueue.h118 #define VIRTIO_MBUF_ADDR_MASK(vq) ((vq)->mbuf_addr_mask) argument
120 #define VIRTIO_MBUF_ADDR_MASK(vq) UINT64_MAX argument
134 #define VIRTIO_MBUF_ADDR(mb, vq) \ argument
142 #define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \ argument
272 desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq) in desc_is_used()
284 vring_desc_init_packed(struct virtqueue *vq, int n) in vring_desc_init_packed()
320 virtqueue_disable_intr_packed(struct virtqueue *vq) in virtqueue_disable_intr_packed()
333 virtqueue_disable_intr_split(struct virtqueue *vq) in virtqueue_disable_intr_split()
342 virtqueue_disable_intr(struct virtqueue *vq) in virtqueue_disable_intr()
354 virtqueue_enable_intr_packed(struct virtqueue *vq) in virtqueue_enable_intr_packed()
[all …]
H A Dvirtio_pci.c193 legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec) in legacy_set_queue_irq()
215 legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) in legacy_setup_queue()
238 legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq) in legacy_del_queue()
248 legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) in legacy_notify_queue()
404 modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec) in modern_set_queue_irq()
423 modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) in modern_setup_queue()
461 modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq) in modern_del_queue()
478 modern_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) in modern_notify_queue()
H A Dvirtio_rxtx_packed.c30 struct virtqueue *vq = virtnet_txq_to_vq(txvq); in virtio_xmit_pkts_packed_vec() local
84 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); in virtio_recv_pkts_packed_vec() local
H A Dvirtio_rxtx_packed.h107 struct virtqueue *vq = virtnet_txq_to_vq(txvq); in virtqueue_enqueue_single_packed_vec() local
214 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); in virtqueue_dequeue_single_packed_vec() local
266 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); in virtio_recv_refill_packed_vec() local
H A Dvirtio_rxtx_packed_avx.h23 struct virtqueue *vq = virtnet_txq_to_vq(txvq); in virtqueue_enqueue_batch_packed_vec() local
145 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); in virtqueue_dequeue_batch_packed_vec() local
H A Dvirtio_rxtx_packed_neon.h23 struct virtqueue *vq = virtnet_txq_to_vq(txvq); in virtqueue_enqueue_batch_packed_vec() local
166 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); in virtqueue_dequeue_batch_packed_vec() local
H A Dvirtio_rxtx_simple.h26 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); in virtio_rxq_rearm_vec() local
H A Dvirtio_ethdev.c225 virtio_control_queue_notify(struct virtqueue *vq, __rte_unused void *cookie) in virtio_control_queue_notify()
236 struct virtqueue *vq; in virtio_init_queue() local
296 struct virtqueue *vq; in virtio_free_queues() local
517 struct virtqueue *vq; in virtio_check_scatter_on_all_rx_queues() local
579 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); in virtio_dev_rx_queue_intr_enable() local
590 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); in virtio_dev_rx_queue_intr_disable() local
1005 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); in virtio_get_monitor_addr() local
2335 struct virtqueue *vq; in virtio_dev_start() local
2437 struct virtqueue *vq; in virtio_dev_free_mbufs() local
2480 struct virtqueue *vq; in virtio_tx_completed_cleanup() local
H A Dvirtio_rxtx_simple_neon.c44 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); in virtio_recv_pkts_vec() local
H A Dvirtio_rxtx_simple_sse.c44 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); in virtio_recv_pkts_vec() local
/dpdk/lib/vhost/
H A Dvhost.c70 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, in __vhost_iova_to_vva() argument
170 __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, in __vhost_log_write_iova() argument
190 __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq) in __vhost_log_cache_sync() argument
228 vhost_log_cache_page(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t page) vhost_log_cache_page() argument
269 __vhost_log_cache_write(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t addr,uint64_t len) __vhost_log_cache_write() argument
288 __vhost_log_cache_write_iova(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t iova,uint64_t len) __vhost_log_cache_write_iova() argument
308 vhost_alloc_copy_ind_table(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t desc_addr,uint64_t desc_len) vhost_alloc_copy_ind_table() argument
341 cleanup_vq(struct vhost_virtqueue * vq,int destroy) cleanup_vq() argument
350 cleanup_vq_inflight(struct virtio_net * dev,struct vhost_virtqueue * vq) cleanup_vq_inflight() argument
392 vhost_free_async_mem(struct vhost_virtqueue * vq) vhost_free_async_mem() argument
411 free_vq(struct virtio_net * dev,struct vhost_virtqueue * vq) free_vq() argument
441 log_translate(struct virtio_net * dev,struct vhost_virtqueue * vq) log_translate() argument
461 translate_log_addr(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t log_addr) translate_log_addr() argument
490 vring_translate_split(struct virtio_net * dev,struct vhost_virtqueue * vq) vring_translate_split() argument
529 vring_translate_packed(struct virtio_net * dev,struct vhost_virtqueue * vq) vring_translate_packed() argument
562 vring_translate(struct virtio_net * dev,struct vhost_virtqueue * vq) vring_translate() argument
585 vring_invalidate(struct virtio_net * dev __rte_unused,struct vhost_virtqueue * vq) vring_invalidate() argument
599 init_vring_queue(struct virtio_net * dev __rte_unused,struct vhost_virtqueue * vq,uint32_t vring_idx) init_vring_queue() argument
622 reset_vring_queue(struct virtio_net * dev,struct vhost_virtqueue * vq) reset_vring_queue() argument
634 struct vhost_virtqueue *vq; alloc_vring_queue() local
679 struct vhost_virtqueue *vq = dev->virtqueue[i]; reset_device() local
989 struct vhost_virtqueue *vq; rte_vhost_get_vhost_vring() local
1025 struct vhost_virtqueue *vq; rte_vhost_get_vhost_ring_inflight() local
1059 struct vhost_virtqueue *vq; rte_vhost_set_inflight_desc_split() local
1098 struct vhost_virtqueue *vq; rte_vhost_set_inflight_desc_packed() local
1164 struct vhost_virtqueue *vq; rte_vhost_clr_inflight_desc_split() local
1206 struct vhost_virtqueue *vq; rte_vhost_clr_inflight_desc_packed() local
1251 struct vhost_virtqueue *vq; rte_vhost_set_last_inflight_io_split() local
1287 struct vhost_virtqueue *vq; rte_vhost_set_last_inflight_io_packed() local
1335 struct vhost_virtqueue *vq; rte_vhost_vring_call() local
1371 struct vhost_virtqueue *vq; rte_vhost_vring_call_nonblock() local
1408 struct vhost_virtqueue *vq; rte_vhost_avail_entries() local
1439 vhost_enable_notify_split(struct virtio_net * dev,struct vhost_virtqueue * vq,int enable) vhost_enable_notify_split() argument
1458 vhost_enable_notify_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,int enable) vhost_enable_notify_packed() argument
1485 vhost_enable_guest_notification(struct virtio_net * dev,struct vhost_virtqueue * vq,int enable) vhost_enable_guest_notification() argument
1504 struct vhost_virtqueue *vq; rte_vhost_enable_guest_notification() local
1537 struct vhost_virtqueue *vq; rte_vhost_notify_guest() local
1585 struct vhost_virtqueue *vq; rte_vhost_log_used_vring() local
1604 struct vhost_virtqueue *vq; rte_vhost_rx_queue_count() local
1667 struct vhost_virtqueue *vq; rte_vhost_get_vring_base() local
1697 struct vhost_virtqueue *vq; rte_vhost_set_vring_base() local
1730 struct vhost_virtqueue *vq; rte_vhost_get_vring_base_from_inflight() local
1772 async_channel_register(struct virtio_net * dev,struct vhost_virtqueue * vq) async_channel_register() argument
1847 struct vhost_virtqueue *vq; rte_vhost_async_channel_register() local
1880 struct vhost_virtqueue *vq; rte_vhost_async_channel_register_thread_unsafe() local
1902 struct vhost_virtqueue *vq; rte_vhost_async_channel_unregister() local
1948 struct vhost_virtqueue *vq; rte_vhost_async_channel_unregister_thread_unsafe() local
2058 struct vhost_virtqueue *vq; rte_vhost_async_get_inflight() local
2096 struct vhost_virtqueue *vq; rte_vhost_async_get_inflight_thread_unsafe() local
2126 struct vhost_virtqueue *vq; rte_vhost_get_monitor_addr() local
2203 struct vhost_virtqueue *vq; rte_vhost_vring_stats_get() local
2247 struct vhost_virtqueue *vq; rte_vhost_vring_stats_reset() local
[all...]
H A Diotlb.h13 vhost_user_iotlb_rd_lock(struct vhost_virtqueue *vq) in vhost_user_iotlb_rd_lock()
20 vhost_user_iotlb_rd_unlock(struct vhost_virtqueue *vq) in vhost_user_iotlb_rd_unlock()
27 vhost_user_iotlb_wr_lock(struct vhost_virtqueue *vq) in vhost_user_iotlb_wr_lock()
34 vhost_user_iotlb_wr_unlock(struct vhost_virtqueue *vq) in vhost_user_iotlb_wr_unlock()
H A Dvirtio_net.c56 vhost_queue_stats_update(const struct virtio_net *dev, struct vhost_virtqueue *vq, in vhost_queue_stats_update() argument
152 ret = vhost_async_dma_transfer_one(dev, vq, dma_i argument
102 vhost_async_dma_transfer_one(struct virtio_net * dev,struct vhost_virtqueue * vq,int16_t dma_id,uint16_t vchan_id,uint16_t flag_idx,struct vhost_iov_iter * pkt) vhost_async_dma_transfer_one() argument
235 do_data_copy_enqueue(struct virtio_net * dev,struct vhost_virtqueue * vq) do_data_copy_enqueue() argument
253 do_data_copy_dequeue(struct vhost_virtqueue * vq) do_data_copy_dequeue() argument
267 do_flush_shadow_used_ring_split(struct virtio_net * dev,struct vhost_virtqueue * vq,uint16_t to,uint16_t from,uint16_t size) do_flush_shadow_used_ring_split() argument
279 flush_shadow_used_ring_split(struct virtio_net * dev,struct vhost_virtqueue * vq) flush_shadow_used_ring_split() argument
309 update_shadow_used_ring_split(struct vhost_virtqueue * vq,uint16_t desc_idx,uint32_t len) update_shadow_used_ring_split() argument
320 vhost_flush_enqueue_shadow_packed(struct virtio_net * dev,struct vhost_virtqueue * vq) vhost_flush_enqueue_shadow_packed() argument
384 vhost_flush_dequeue_shadow_packed(struct virtio_net * dev,struct vhost_virtqueue * vq) vhost_flush_dequeue_shadow_packed() argument
403 vhost_flush_enqueue_batch_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t * lens,uint16_t * ids) vhost_flush_enqueue_batch_packed() argument
438 vhost_async_shadow_enqueue_packed_batch(struct vhost_virtqueue * vq,uint64_t * lens,uint16_t * ids) vhost_async_shadow_enqueue_packed_batch() argument
457 vhost_async_shadow_dequeue_packed_batch(struct vhost_virtqueue * vq,uint16_t * ids) vhost_async_shadow_dequeue_packed_batch() argument
475 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue * vq,uint16_t id) vhost_shadow_dequeue_batch_packed_inorder() argument
494 vhost_shadow_dequeue_batch_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,uint16_t * ids) vhost_shadow_dequeue_batch_packed() argument
533 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue * vq,uint16_t buf_id,uint16_t count) vhost_shadow_dequeue_single_packed() argument
565 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue * vq,uint16_t buf_id,uint16_t count) vhost_shadow_dequeue_single_packed_inorder() argument
593 vhost_shadow_enqueue_packed(struct vhost_virtqueue * vq,uint32_t * len,uint16_t * id,uint16_t * count,uint16_t num_buffers) vhost_shadow_enqueue_packed() argument
615 vhost_async_shadow_enqueue_packed(struct vhost_virtqueue * vq,uint32_t * len,uint16_t * id,uint16_t * count,uint16_t num_buffers) vhost_async_shadow_enqueue_packed() argument
637 vhost_shadow_enqueue_single_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,uint32_t * len,uint16_t * id,uint16_t * count,uint16_t num_buffers) vhost_shadow_enqueue_single_packed() argument
731 map_one_desc(struct virtio_net * dev,struct vhost_virtqueue * vq,struct buf_vector * buf_vec,uint16_t * vec_idx,uint64_t desc_iova,uint64_t desc_len,uint8_t perm) map_one_desc() argument
768 fill_vec_buf_split(struct virtio_net * dev,struct vhost_virtqueue * vq,uint32_t avail_idx,uint16_t * vec_idx,struct buf_vector * buf_vec,uint16_t * desc_chain_head,uint32_t * desc_chain_len,uint8_t perm) fill_vec_buf_split() argument
852 reserve_avail_buf_split(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t size,struct buf_vector * buf_vec,uint16_t * num_buffers,uint16_t avail_head,uint16_t * nr_vec) reserve_avail_buf_split() argument
904 fill_vec_buf_packed_indirect(struct virtio_net * dev,struct vhost_virtqueue * vq,struct vring_packed_desc * desc,uint16_t * vec_idx,struct buf_vector * buf_vec,uint32_t * len,uint8_t perm) fill_vec_buf_packed_indirect() argument
962 fill_vec_buf_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,uint16_t avail_idx,uint16_t * desc_count,struct buf_vector * buf_vec,uint16_t * vec_idx,uint16_t * buf_id,uint32_t * len,uint8_t perm) fill_vec_buf_packed() argument
1029 copy_vnet_hdr_to_desc(struct virtio_net * dev,struct vhost_virtqueue * vq,struct buf_vector * buf_vec,struct virtio_net_hdr_mrg_rxbuf * hdr) copy_vnet_hdr_to_desc() argument
1132 async_fill_seg(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf * m,uint32_t mbuf_offset,uint64_t buf_iova,uint32_t cpy_len,bool to_desc) async_fill_seg() argument
1174 sync_fill_seg(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf * m,uint32_t mbuf_offset,uint64_t buf_addr,uint64_t buf_iova,uint32_t cpy_len,bool to_desc) sync_fill_seg() argument
1212 mbuf_to_desc(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf * m,struct buf_vector * buf_vec,uint16_t nr_vec,uint16_t num_buffers,bool is_async) mbuf_to_desc() argument
1341 vhost_enqueue_single_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf * pkt,struct buf_vector * buf_vec,uint16_t * nr_descs) vhost_enqueue_single_packed() argument
1405 virtio_dev_rx_split(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,uint32_t count) virtio_dev_rx_split() argument
1462 virtio_dev_rx_sync_batch_check(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,uint64_t * desc_addrs,uint64_t * lens) virtio_dev_rx_sync_batch_check() argument
1513 virtio_dev_rx_async_batch_check(struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,uint64_t * desc_addrs,uint64_t * lens,int16_t dma_id,uint16_t vchan_id) virtio_dev_rx_async_batch_check() argument
1566 virtio_dev_rx_batch_packed_copy(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,uint64_t * desc_addrs,uint64_t * lens) virtio_dev_rx_batch_packed_copy() argument
1616 virtio_dev_rx_sync_batch_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts) virtio_dev_rx_sync_batch_packed() argument
1638 virtio_dev_rx_single_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf * pkt) virtio_dev_rx_single_packed() argument
1663 virtio_dev_rx_packed(struct virtio_net * dev,struct vhost_virtqueue * __rte_restrict vq,struct rte_mbuf ** __rte_restrict pkts,uint32_t count) virtio_dev_rx_packed() argument
1700 virtio_dev_vring_translate(struct virtio_net * dev,struct vhost_virtqueue * vq) virtio_dev_vring_translate() argument
1711 virtio_dev_rx(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,uint32_t count) virtio_dev_rx() argument
1780 async_get_first_inflight_pkt_idx(struct vhost_virtqueue * vq) async_get_first_inflight_pkt_idx() argument
1808 virtio_dev_rx_async_submit_split(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,uint32_t count,int16_t dma_id,uint16_t vchan_id) virtio_dev_rx_async_submit_split() argument
1914 vhost_enqueue_async_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf * pkt,struct buf_vector * buf_vec,uint16_t * nr_descs,uint16_t * nr_buffers) vhost_enqueue_async_packed() argument
1977 virtio_dev_rx_async_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf * pkt,uint16_t * nr_descs,uint16_t * nr_buffers) virtio_dev_rx_async_packed() argument
1999 virtio_dev_rx_async_packed_batch_enqueue(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,uint64_t * desc_addrs,uint64_t * lens) virtio_dev_rx_async_packed_batch_enqueue() argument
2062 virtio_dev_rx_async_packed_batch(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,int16_t dma_id,uint16_t vchan_id) virtio_dev_rx_async_packed_batch() argument
2080 dma_error_handler_packed(struct vhost_virtqueue * vq,uint16_t slot_idx,uint32_t nr_err,uint32_t * pkt_idx) dma_error_handler_packed() argument
2111 virtio_dev_rx_async_submit_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,uint32_t count,int16_t dma_id,uint16_t vchan_id) virtio_dev_rx_async_submit_packed() argument
2186 write_back_completed_descs_split(struct vhost_virtqueue * vq,uint16_t n_descs) write_back_completed_descs_split() argument
2218 write_back_completed_descs_packed(struct vhost_virtqueue * vq,uint16_t n_buffers) write_back_completed_descs_packed() argument
2283 vhost_poll_enqueue_completed(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,uint16_t count,int16_t dma_id,uint16_t vchan_id) vhost_poll_enqueue_completed() argument
2357 struct vhost_virtqueue *vq; rte_vhost_poll_enqueue_completed() local
2412 struct vhost_virtqueue *vq; rte_vhost_clear_queue_thread_unsafe() local
2468 struct vhost_virtqueue *vq; rte_vhost_clear_queue() local
2525 virtio_dev_rx_async_submit(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,uint32_t count,int16_t dma_id,uint16_t vchan_id) virtio_dev_rx_async_submit() argument
2891 desc_to_mbuf(struct virtio_net * dev,struct vhost_virtqueue * vq,struct buf_vector * buf_vec,uint16_t nr_vec,struct rte_mbuf * m,struct rte_mempool * mbuf_pool,bool legacy_ol_flags,uint16_t slot_idx,bool is_async) desc_to_mbuf() argument
3100 virtio_dev_tx_split(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mempool * mbuf_pool,struct rte_mbuf ** pkts,uint16_t count,bool legacy_ol_flags) virtio_dev_tx_split() argument
3196 virtio_dev_tx_split_legacy(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mempool * mbuf_pool,struct rte_mbuf ** pkts,uint16_t count) virtio_dev_tx_split_legacy() argument
3207 virtio_dev_tx_split_compliant(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mempool * mbuf_pool,struct rte_mbuf ** pkts,uint16_t count) virtio_dev_tx_split_compliant() argument
3217 vhost_reserve_avail_batch_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,uint16_t avail_idx,uintptr_t * desc_addrs,uint16_t * ids) vhost_reserve_avail_batch_packed() argument
3289 vhost_async_tx_batch_packed_check(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,uint16_t avail_idx,uintptr_t * desc_addrs,uint64_t * lens,uint16_t * ids,int16_t dma_id,uint16_t vchan_id) vhost_async_tx_batch_packed_check() argument
3363 virtio_dev_tx_batch_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,bool legacy_ol_flags) virtio_dev_tx_batch_packed() argument
3407 vhost_dequeue_single_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mempool * mbuf_pool,struct rte_mbuf * pkts,uint16_t * buf_id,uint16_t * desc_count,bool legacy_ol_flags) vhost_dequeue_single_packed() argument
3459 virtio_dev_tx_single_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mempool * mbuf_pool,struct rte_mbuf * pkts,bool legacy_ol_flags) virtio_dev_tx_single_packed() argument
3488 get_nb_avail_entries_packed(const struct vhost_virtqueue * __rte_restrict vq,uint16_t max_nb_avail_entries) get_nb_avail_entries_packed() argument
3519 virtio_dev_tx_packed(struct virtio_net * dev,struct vhost_virtqueue * __rte_restrict vq,struct rte_mempool * mbuf_pool,struct rte_mbuf ** __rte_restrict pkts,uint32_t count,bool legacy_ol_flags) virtio_dev_tx_packed() argument
3573 virtio_dev_tx_packed_legacy(struct virtio_net * dev,struct vhost_virtqueue * __rte_restrict vq,struct rte_mempool * mbuf_pool,struct rte_mbuf ** __rte_restrict pkts,uint32_t count) virtio_dev_tx_packed_legacy() argument
3584 virtio_dev_tx_packed_compliant(struct virtio_net * dev,struct vhost_virtqueue * __rte_restrict vq,struct rte_mempool * mbuf_pool,struct rte_mbuf ** __rte_restrict pkts,uint32_t count) virtio_dev_tx_packed_compliant() argument
3598 struct vhost_virtqueue *vq; rte_vhost_dequeue_burst() local
3703 async_poll_dequeue_completed(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,uint16_t count,int16_t dma_id,uint16_t vchan_id,bool legacy_ol_flags) async_poll_dequeue_completed() argument
3751 virtio_dev_tx_async_split(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mempool * mbuf_pool,struct rte_mbuf ** pkts,uint16_t count,int16_t dma_id,uint16_t vchan_id,bool legacy_ol_flags) virtio_dev_tx_async_split() argument
3902 virtio_dev_tx_async_split_legacy(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mempool * mbuf_pool,struct rte_mbuf ** pkts,uint16_t count,int16_t dma_id,uint16_t vchan_id) virtio_dev_tx_async_split_legacy() argument
3915 virtio_dev_tx_async_split_compliant(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mempool * mbuf_pool,struct rte_mbuf ** pkts,uint16_t count,int16_t dma_id,uint16_t vchan_id) virtio_dev_tx_async_split_compliant() argument
3926 vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue * vq,uint16_t buf_id,uint16_t count) vhost_async_shadow_dequeue_single_packed() argument
3945 virtio_dev_tx_async_single_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mempool * mbuf_pool,struct rte_mbuf * pkts,uint16_t slot_idx,bool legacy_ol_flags) virtio_dev_tx_async_single_packed() argument
4000 virtio_dev_tx_async_packed_batch(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mbuf ** pkts,uint16_t slot_idx,uint16_t dma_id,uint16_t vchan_id) virtio_dev_tx_async_packed_batch() argument
4058 virtio_dev_tx_async_packed(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mempool * mbuf_pool,struct rte_mbuf ** pkts,uint16_t count,uint16_t dma_id,uint16_t vchan_id,bool legacy_ol_flags) virtio_dev_tx_async_packed() argument
4171 virtio_dev_tx_async_packed_legacy(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mempool * mbuf_pool,struct rte_mbuf ** pkts,uint16_t count,uint16_t dma_id,uint16_t vchan_id) virtio_dev_tx_async_packed_legacy() argument
4183 virtio_dev_tx_async_packed_compliant(struct virtio_net * dev,struct vhost_virtqueue * vq,struct rte_mempool * mbuf_pool,struct rte_mbuf ** pkts,uint16_t count,uint16_t dma_id,uint16_t vchan_id) virtio_dev_tx_async_packed_compliant() argument
4200 struct vhost_virtqueue *vq; rte_vhost_async_try_dequeue_burst() local
[all...]
H A Dvhost_user.c121 #define VHOST_USER_ASSERT_LOCK(dev, vq, id) do { \ argument
305 vhost_user_notify_queue_state(struct virtio_net *dev, struct vhost_virtqueue *vq, in vhost_user_notify_queue_state() argument
453 struct vhost_virtqueue *vq; in vhost_user_set_features() local
486 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; vhost_user_set_vring_num() local
564 struct vhost_virtqueue *vq; numa_realloc() local
777 ring_addr_to_vva(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t ra,uint64_t * size) ring_addr_to_vva() argument
795 log_addr_to_gpa(struct virtio_net * dev,struct vhost_virtqueue * vq) log_addr_to_gpa() argument
843 struct vhost_virtqueue *vq; translate_ring_addresses() local
979 struct vhost_virtqueue *vq; vhost_user_set_vring_addr() local
1025 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; vhost_user_set_vring_base() local
1502 struct vhost_virtqueue *vq = dev->virtqueue[i]; vhost_user_set_mem_table() local
1546 vq_is_ready(struct virtio_net * dev,struct vhost_virtqueue * vq) vq_is_ready() argument
1572 struct vhost_virtqueue *vq; virtio_is_ready() local
1787 struct vhost_virtqueue *vq; vhost_user_set_inflight_fd() local
1895 struct vhost_virtqueue *vq; vhost_user_set_vring_call() local
1958 vhost_check_queue_inflights_split(struct virtio_net * dev,struct vhost_virtqueue * vq) vhost_check_queue_inflights_split() argument
2045 vhost_check_queue_inflights_packed(struct virtio_net * dev,struct vhost_virtqueue * vq) vhost_check_queue_inflights_packed() argument
2146 struct vhost_virtqueue *vq; vhost_user_set_vring_kick() local
2213 struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; vhost_user_get_vring_base() local
2291 struct vhost_virtqueue *vq; vhost_user_set_vring_enable() local
2423 struct vhost_virtqueue *vq = dev->virtqueue[i]; vhost_user_set_log_base() local
2553 is_vring_iotlb_split(struct vhost_virtqueue * vq,struct vhost_iotlb_msg * imsg) is_vring_iotlb_split() argument
2586 is_vring_iotlb_packed(struct vhost_virtqueue * vq,struct vhost_iotlb_msg * imsg) is_vring_iotlb_packed() argument
2618 is_vring_iotlb(struct virtio_net * dev,struct vhost_virtqueue * vq,struct vhost_iotlb_msg * imsg) is_vring_iotlb() argument
2723 struct vhost_virtqueue *vq = dev->virtqueue[i]; vhost_user_iotlb_msg() local
2740 struct vhost_virtqueue *vq = dev->virtqueue[i]; vhost_user_iotlb_msg() local
3071 struct vhost_virtqueue *vq = dev->virtqueue[i]; vhost_user_lock_all_queue_pairs() local
3089 struct vhost_virtqueue *vq = dev->virtqueue[i]; vhost_user_unlock_all_queue_pairs() local
3276 struct vhost_virtqueue *vq = dev->virtqueue[i]; vhost_user_msg_handler() local
3489 vhost_user_inject_irq(struct virtio_net * dev __rte_unused,struct vhost_virtqueue * vq) vhost_user_inject_irq() argument
[all...]
H A Dvduse.c47 vduse_inject_irq(struct virtio_net *dev, struct vhost_virtqueue *vq) in vduse_inject_irq() argument
141 struct vhost_virtqueue *vq = dev->virtqueue[index]; in vduse_vring_setup() local
234 struct vhost_virtqueue *vq = dev->virtqueue[index]; vduse_vring_cleanup() local
311 struct vhost_virtqueue *vq = dev->virtqueue[i]; vduse_device_start() local
344 struct vhost_virtqueue *vq; vduse_events_handler() local
[all...]
H A Dvhost.h544 vq_assert_lock__(struct virtio_net * dev,struct vhost_virtqueue * vq,const char * func) vq_assert_lock__() argument
551 vq_assert_lock(dev,vq) global() argument
570 vq_inc_last_used_packed(struct vhost_virtqueue * vq,uint16_t num) vq_inc_last_used_packed() argument
580 vq_inc_last_avail_packed(struct vhost_virtqueue * vq,uint16_t num) vq_inc_last_avail_packed() argument
612 vhost_log_cache_sync(struct virtio_net * dev,struct vhost_virtqueue * vq) vhost_log_cache_sync() argument
619 vhost_log_cache_write(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t addr,uint64_t len) vhost_log_cache_write() argument
627 vhost_log_cache_used_vring(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t offset,uint64_t len) vhost_log_cache_used_vring() argument
639 vhost_log_used_vring(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t offset,uint64_t len) vhost_log_used_vring() argument
650 vhost_log_cache_write_iova(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t iova,uint64_t len) vhost_log_cache_write_iova() argument
664 vhost_log_write_iova(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t iova,uint64_t len) vhost_log_write_iova() argument
887 vhost_iova_to_vva(struct virtio_net * dev,struct vhost_virtqueue * vq,uint64_t iova,uint64_t * len,uint8_t perm) vhost_iova_to_vva() argument
915 vhost_vring_inject_irq(struct virtio_net * dev,struct vhost_virtqueue * vq) vhost_vring_inject_irq() argument
954 vhost_vring_call_split(struct virtio_net * dev,struct vhost_virtqueue * vq) vhost_vring_call_split() argument
983 vhost_vring_call_packed(struct virtio_net * dev,struct vhost_virtqueue * vq) vhost_vring_call_packed() argument
[all...]
/dpdk/examples/vhost_blk/
H A Dvhost_blk.c73 struct vhost_blk_queue *vq = task->vq; in enqueue_task() local
104 struct vhost_blk_queue *vq = task->vq; in enqueue_task_packed() local
179 vring_get_next_desc(struct vhost_blk_queue * vq,struct vring_desc * desc) vring_get_next_desc() argument
188 vring_get_next_desc_packed(struct vhost_blk_queue * vq,uint16_t * req_idx) vring_get_next_desc_packed() argument
199 vring_get_next_inflight_desc(struct vhost_blk_queue * vq,struct rte_vhost_inflight_desc_packed * desc) vring_get_next_inflight_desc() argument
210 setup_iovs_from_descs_split(struct vhost_blk_ctrlr * ctrlr,struct vhost_blk_queue * vq,uint16_t req_idx,struct iovec * iovs,uint32_t * iovs_idx,uint32_t * payload) setup_iovs_from_descs_split() argument
241 setup_iovs_from_descs_packed(struct vhost_blk_ctrlr * ctrlr,struct vhost_blk_queue * vq,uint16_t req_idx,struct iovec * iovs,uint32_t * iovs_idx,uint32_t * payload) setup_iovs_from_descs_packed() argument
272 setup_iovs_from_inflight_desc(struct vhost_blk_ctrlr * ctrlr,struct vhost_blk_queue * vq,uint16_t req_idx,struct iovec * iovs,uint32_t * iovs_idx,uint32_t * payload) setup_iovs_from_inflight_desc() argument
376 submit_inflight_vq(struct vhost_blk_queue * vq) submit_inflight_vq() argument
433 vhost_blk_vq_get_desc_chain_buffer_id(struct vhost_blk_queue * vq,uint16_t * req_head,uint16_t * num) vhost_blk_vq_get_desc_chain_buffer_id() argument
457 vq_get_desc_idx(struct vhost_blk_queue * vq) vq_get_desc_idx() argument
470 vhost_blk_vq_is_avail(struct vhost_blk_queue * vq) vhost_blk_vq_is_avail() argument
488 process_vq(struct vhost_blk_queue * vq) process_vq() argument
560 struct vhost_blk_queue *vq; alloc_task_pool() local
596 struct vhost_blk_queue *vq; new_device() local
697 struct vhost_blk_queue *vq; destroy_device() local
[all...]
/dpdk/drivers/crypto/virtio/
H A Dvirtqueue.h105 virtqueue_full(const struct virtqueue *vq) in virtqueue_full()
110 #define VIRTQUEUE_NUSED(vq) \ argument
114 vq_update_avail_idx(struct virtqueue *vq) in vq_update_avail_idx()
121 vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx) in vq_update_avail_ring()
138 virtqueue_kick_prepare(struct virtqueue *vq) in virtqueue_kick_prepare()
144 virtqueue_notify(struct virtqueue *vq) in virtqueue_notify()
157 #define VIRTQUEUE_DUMP(vq) do { \ argument
H A Dvirtqueue.c14 virtqueue_disable_intr(struct virtqueue *vq) in virtqueue_disable_intr()
25 virtqueue_detatch_unused(struct virtqueue *vq) in virtqueue_detatch_unused()
H A Dvirtio_pci.c27 check_vq_phys_addr_ok(struct virtqueue *vq) in check_vq_phys_addr_ok()
138 modern_set_queue_irq(struct virtio_crypto_hw *hw, struct virtqueue *vq, in modern_set_queue_irq()
154 modern_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq) in modern_setup_queue()
194 modern_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq) in modern_del_queue()
210 struct virtqueue *vq) in modern_notify_queue()
H A Dvirtio_rxtx.c11 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) in vq_ring_free_chain()
45 virtqueue_dequeue_burst_rx(struct virtqueue *vq, in virtqueue_dequeue_burst_rx()
367 virtio_crypto_vring_start(struct virtqueue *vq) in virtio_crypto_vring_start()
/dpdk/drivers/dma/odm/
H A Dodm.c18 struct odm_queue *vq = &odm->vq[qno]; in odm_vchan_resc_free() local
68 struct odm_queue *vq = &odm->vq[vchan]; in odm_queue_ring_config() local
86 struct odm_queue *vq; in odm_enable() local
131 struct odm_queue *vq = &odm->vq[vchan]; in odm_vchan_setup() local
/dpdk/drivers/vdpa/mlx5/
H A Dmlx5_vdpa_virtq.c283 struct rte_vhost_vring *vq, in mlx5_vdpa_virtq_sub_objs_prepare()
453 struct rte_vhost_vring vq = { in mlx5_vdpa_virtq_single_resource_prepare() local
493 struct rte_vhost_vring *vq, int index) in mlx5_vdpa_virtq_doorbell_setup()
509 struct rte_vhost_vring vq; in mlx5_vdpa_virtq_setup() local
643 struct rte_vhost_vring vq; in mlx5_vdpa_is_pre_created_vq_mismatch() local
674 struct rte_vhost_vring vq; in mlx5_vdpa_virtqs_prepare() local
804 struct rte_vhost_vring vq; in mlx5_vdpa_virtq_is_modified() local
/dpdk/drivers/vdpa/sfc/
H A Dsfc_vdpa_ops.c181 struct rte_vhost_vring vq; in sfc_vdpa_get_vring_info() local
228 efx_virtio_vq_t *vq; in sfc_vdpa_virtq_start() local
314 efx_virtio_vq_t *vq; in sfc_vdpa_virtq_stop() local
340 efx_virtio_vq_t *vq; in sfc_vdpa_configure() local

12