xref: /dpdk/drivers/net/netvsc/hn_var.h (revision 2d0c29a37a9c080c1cccb1ad7941aba2ccf5437e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2009-2018 Microsoft Corp.
3  * Copyright (c) 2016 Brocade Communications Systems, Inc.
4  * Copyright (c) 2012 NetApp Inc.
5  * Copyright (c) 2012 Citrix Inc.
6  * All rights reserved.
7  */
8 
9 /*
10  * Tunable ethdev params
11  */
12 #define HN_MIN_RX_BUF_SIZE	1024
13 #define HN_MAX_XFER_LEN		2048
14 #define	HN_MAX_MAC_ADDRS	1
15 #define HN_MAX_CHANNELS		64
16 
17 /* Claimed to be 12232B */
18 #define HN_MTU_MAX		(9 * 1024)
19 
20 /* Retry interval */
21 #define HN_CHAN_INTERVAL_US	100
22 
23 /* Host monitor interval */
24 #define HN_CHAN_LATENCY_NS	50000
25 
26 /* Buffers need to be aligned */
27 #ifndef PAGE_SIZE
28 #define PAGE_SIZE 4096
29 #endif
30 
31 #ifndef PAGE_MASK
32 #define PAGE_MASK (PAGE_SIZE - 1)
33 #endif
34 
35 struct hn_data;
36 struct hn_txdesc;
37 
38 struct hn_stats {
39 	uint64_t	packets;
40 	uint64_t	bytes;
41 	uint64_t	errors;
42 	uint64_t	ring_full;
43 	uint64_t	multicast;
44 	uint64_t	broadcast;
45 	/* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
46 	uint64_t	size_bins[8];
47 };
48 
49 struct hn_tx_queue {
50 	struct hn_data  *hv;
51 	struct vmbus_channel *chan;
52 	uint16_t	port_id;
53 	uint16_t	queue_id;
54 	uint32_t	free_thresh;
55 
56 	/* Applied packet transmission aggregation limits. */
57 	uint32_t	agg_szmax;
58 	uint32_t	agg_pktmax;
59 	uint32_t	agg_align;
60 
61 	/* Packet transmission aggregation states */
62 	struct hn_txdesc *agg_txd;
63 	uint32_t	agg_pktleft;
64 	uint32_t	agg_szleft;
65 	struct rndis_packet_msg *agg_prevpkt;
66 
67 	struct hn_stats stats;
68 };
69 
70 struct hn_rx_queue {
71 	struct hn_data  *hv;
72 	struct vmbus_channel *chan;
73 	struct rte_mempool *mb_pool;
74 	struct rte_ring *rx_ring;
75 
76 	rte_spinlock_t ring_lock;
77 	uint32_t event_sz;
78 	uint16_t port_id;
79 	uint16_t queue_id;
80 	struct hn_stats stats;
81 
82 	void *event_buf;
83 };
84 
85 
86 /* multi-packet data from host */
87 struct hn_rx_bufinfo {
88 	struct vmbus_channel *chan;
89 	struct hn_data *hv;
90 	uint64_t	xactid;
91 	struct rte_mbuf_ext_shared_info shinfo;
92 } __rte_cache_aligned;
93 
94 #define HN_INVALID_PORT	UINT16_MAX
95 
96 struct hn_data {
97 	struct rte_vmbus_device *vmbus;
98 	struct hn_rx_queue *primary;
99 	rte_spinlock_t  vf_lock;
100 	uint16_t	port_id;
101 	uint16_t	vf_port;
102 
103 	uint8_t		vf_present;
104 	uint8_t		closed;
105 	uint8_t		vlan_strip;
106 
107 	uint32_t	link_status;
108 	uint32_t	link_speed;
109 
110 	struct rte_mem_resource *rxbuf_res;	/* UIO resource for Rx */
111 	struct hn_rx_bufinfo *rxbuf_info;
112 	uint32_t	rxbuf_section_cnt;	/* # of Rx sections */
113 	volatile uint32_t rxbuf_outstanding;
114 	uint16_t	max_queues;		/* Max available queues */
115 	uint16_t	num_queues;
116 	uint64_t	rss_offloads;
117 
118 	struct rte_mem_resource *chim_res;	/* UIO resource for Tx */
119 	struct rte_mempool *tx_pool;		/* Tx descriptors */
120 	uint32_t	chim_szmax;		/* Max size per buffer */
121 	uint32_t	chim_cnt;		/* Max packets per buffer */
122 
123 	uint32_t	latency;
124 	uint32_t	nvs_ver;
125 	uint32_t	ndis_ver;
126 	uint32_t	rndis_agg_size;
127 	uint32_t	rndis_agg_pkts;
128 	uint32_t	rndis_agg_align;
129 
130 	volatile uint32_t  rndis_pending;
131 	rte_atomic32_t	rndis_req_id;
132 	uint8_t		rndis_resp[256];
133 
134 	struct ether_addr mac_addr;
135 
136 	struct rte_eth_dev_owner owner;
137 	struct rte_intr_handle vf_intr;
138 
139 	struct vmbus_channel *channels[HN_MAX_CHANNELS];
140 };
141 
142 static inline struct vmbus_channel *
143 hn_primary_chan(const struct hn_data *hv)
144 {
145 	return hv->channels[0];
146 }
147 
148 uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id,
149 		       uint32_t tx_limit);
150 
151 uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
152 		      uint16_t nb_pkts);
153 uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
154 		      uint16_t nb_pkts);
155 
156 int	hn_tx_pool_init(struct rte_eth_dev *dev);
157 void	hn_tx_pool_uninit(struct rte_eth_dev *dev);
158 int	hn_dev_link_update(struct rte_eth_dev *dev, int wait);
159 int	hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
160 			      uint16_t nb_desc, unsigned int socket_id,
161 			      const struct rte_eth_txconf *tx_conf);
162 void	hn_dev_tx_queue_release(void *arg);
163 void	hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
164 			     struct rte_eth_txq_info *qinfo);
165 int	hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);
166 
167 struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
168 				      uint16_t queue_id,
169 				      unsigned int socket_id);
170 int	hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
171 			      uint16_t queue_idx, uint16_t nb_desc,
172 			      unsigned int socket_id,
173 			      const struct rte_eth_rxconf *rx_conf,
174 			      struct rte_mempool *mp);
175 void	hn_dev_rx_queue_release(void *arg);
176 
177 /* Check if VF is attached */
178 static inline bool
179 hn_vf_attached(const struct hn_data *hv)
180 {
181 	return hv->vf_port != HN_INVALID_PORT;
182 }
183 
184 /* Get VF device for existing netvsc device */
185 static inline struct rte_eth_dev *
186 hn_get_vf_dev(const struct hn_data *hv)
187 {
188 	uint16_t vf_port = hv->vf_port;
189 
190 	/* make sure vf_port is loaded */
191 	rte_smp_rmb();
192 
193 	if (vf_port == HN_INVALID_PORT)
194 		return NULL;
195 	else
196 		return &rte_eth_devices[vf_port];
197 }
198 
199 void	hn_vf_info_get(struct hn_data *hv,
200 		       struct rte_eth_dev_info *info);
201 int	hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
202 int	hn_vf_configure(struct rte_eth_dev *dev,
203 			const struct rte_eth_conf *dev_conf);
204 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev);
205 int	hn_vf_start(struct rte_eth_dev *dev);
206 void	hn_vf_reset(struct rte_eth_dev *dev);
207 void	hn_vf_stop(struct rte_eth_dev *dev);
208 void	hn_vf_close(struct rte_eth_dev *dev);
209 
210 void	hn_vf_allmulticast_enable(struct rte_eth_dev *dev);
211 void	hn_vf_allmulticast_disable(struct rte_eth_dev *dev);
212 void	hn_vf_promiscuous_enable(struct rte_eth_dev *dev);
213 void	hn_vf_promiscuous_disable(struct rte_eth_dev *dev);
214 int	hn_vf_mc_addr_list(struct rte_eth_dev *dev,
215 			   struct ether_addr *mc_addr_set,
216 			   uint32_t nb_mc_addr);
217 
218 int	hn_vf_link_update(struct rte_eth_dev *dev,
219 			  int wait_to_complete);
220 int	hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
221 			     uint16_t queue_idx, uint16_t nb_desc,
222 			     unsigned int socket_id,
223 			     const struct rte_eth_txconf *tx_conf);
224 void	hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id);
225 int	hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
226 			     uint16_t queue_idx, uint16_t nb_desc,
227 			     unsigned int socket_id,
228 			     const struct rte_eth_rxconf *rx_conf,
229 			     struct rte_mempool *mp);
230 void	hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id);
231 
232 int	hn_vf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
233 void	hn_vf_stats_reset(struct rte_eth_dev *dev);
234 int	hn_vf_xstats_get_names(struct rte_eth_dev *dev,
235 			       struct rte_eth_xstat_name *xstats_names,
236 			       unsigned int size);
237 int	hn_vf_xstats_get(struct rte_eth_dev *dev,
238 			 struct rte_eth_xstat *xstats,
239 			 unsigned int n);
240 void	hn_vf_xstats_reset(struct rte_eth_dev *dev);
241