1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2015 Intel Corporation 3 */ 4 5 #ifndef _VIRTIO_RXTX_H_ 6 #define _VIRTIO_RXTX_H_ 7 8 #define RTE_PMD_VIRTIO_RX_MAX_BURST 64 9 10 struct virtnet_stats { 11 uint64_t packets; 12 uint64_t bytes; 13 uint64_t errors; 14 uint64_t multicast; 15 uint64_t broadcast; 16 /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */ 17 uint64_t size_bins[8]; 18 }; 19 20 struct virtnet_rx { 21 /* dummy mbuf, for wraparound when processing RX ring. */ 22 struct rte_mbuf *fake_mbuf; 23 uint64_t mbuf_initializer; /**< value to init mbufs. */ 24 struct rte_mempool *mpool; /**< mempool for mbuf allocation */ 25 26 uint16_t queue_id; /**< DPDK queue index. */ 27 uint16_t port_id; /**< Device port identifier. */ 28 29 /* Statistics */ 30 struct virtnet_stats stats; 31 32 const struct rte_memzone *mz; /**< mem zone to populate RX ring. */ 33 }; 34 35 struct virtnet_tx { 36 /**< memzone to populate hdr. */ 37 const struct rte_memzone *virtio_net_hdr_mz; 38 rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */ 39 40 uint16_t queue_id; /**< DPDK queue index. */ 41 uint16_t port_id; /**< Device port identifier. */ 42 43 /* Statistics */ 44 struct virtnet_stats stats; 45 46 const struct rte_memzone *mz; /**< mem zone to populate TX ring. */ 47 }; 48 49 struct virtnet_ctl { 50 /**< memzone to populate hdr. */ 51 const struct rte_memzone *virtio_net_hdr_mz; 52 rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */ 53 uint16_t port_id; /**< Device port identifier. */ 54 const struct rte_memzone *mz; /**< mem zone to populate CTL ring. */ 55 rte_spinlock_t lock; /**< spinlock for control queue. */ 56 }; 57 58 int virtio_rxq_vec_setup(struct virtnet_rx *rxvq); 59 void virtio_update_packet_stats(struct virtnet_stats *stats, 60 struct rte_mbuf *mbuf); 61 62 #endif /* _VIRTIO_RXTX_H_ */ 63