xref: /dpdk/drivers/net/netvsc/hn_var.h (revision 27595cd83053b2d39634a159d6709b3ce3cdf3b0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2009-2018 Microsoft Corp.
3  * Copyright (c) 2016 Brocade Communications Systems, Inc.
4  * Copyright (c) 2012 NetApp Inc.
5  * Copyright (c) 2012 Citrix Inc.
6  * All rights reserved.
7  */
8 
9 #include <rte_eal_paging.h>
10 #include <ethdev_driver.h>
11 
12 /*
13  * Tunable ethdev params
14  */
15 #define HN_MIN_RX_BUF_SIZE	1024
16 #define HN_MAX_XFER_LEN		RTE_ETHER_MAX_JUMBO_FRAME_LEN
17 #define	HN_MAX_MAC_ADDRS	1
18 #define HN_MAX_CHANNELS		64
19 
20 /* Claimed to be 12232B */
21 #define HN_MTU_MAX		(9 * 1024)
22 
23 /* Retry interval */
24 #define HN_CHAN_INTERVAL_US	100
25 
26 /* Host monitor interval */
27 #define HN_CHAN_LATENCY_NS	50000
28 
29 #define HN_TXCOPY_THRESHOLD	512
30 #define HN_RXCOPY_THRESHOLD	256
31 
32 #define HN_RX_EXTMBUF_ENABLE	0
33 
34 #ifndef PAGE_MASK
35 #define PAGE_MASK (rte_mem_page_size() - 1)
36 #endif
37 
38 struct hn_data;
39 struct hn_txdesc;
40 
41 struct hn_stats {
42 	uint64_t	packets;
43 	uint64_t	bytes;
44 	uint64_t	errors;
45 	uint64_t	ring_full;
46 	uint64_t	channel_full;
47 	uint64_t	multicast;
48 	uint64_t	broadcast;
49 	/* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
50 	uint64_t	size_bins[8];
51 };
52 
53 struct hn_tx_queue {
54 	struct hn_data  *hv;
55 	struct vmbus_channel *chan;
56 	uint16_t	port_id;
57 	uint16_t	queue_id;
58 	uint32_t	free_thresh;
59 	struct rte_mempool *txdesc_pool;
60 	const struct rte_memzone *tx_rndis_mz;
61 	void		*tx_rndis;
62 	rte_iova_t	tx_rndis_iova;
63 
64 	/* Applied packet transmission aggregation limits. */
65 	uint32_t	agg_szmax;
66 	uint32_t	agg_pktmax;
67 	uint32_t	agg_align;
68 
69 	/* Packet transmission aggregation states */
70 	struct hn_txdesc *agg_txd;
71 	uint32_t	agg_pktleft;
72 	uint32_t	agg_szleft;
73 	struct rndis_packet_msg *agg_prevpkt;
74 
75 	struct hn_stats stats;
76 };
77 
78 struct hn_rx_queue {
79 	struct hn_data  *hv;
80 	struct vmbus_channel *chan;
81 	struct rte_mempool *mb_pool;
82 	struct rte_ring *rx_ring;
83 
84 	rte_spinlock_t ring_lock;
85 	uint32_t event_sz;
86 	uint16_t port_id;
87 	uint16_t queue_id;
88 	struct hn_stats stats;
89 
90 	void *event_buf;
91 	struct hn_rx_bufinfo *rxbuf_info;
92 	rte_atomic32_t  rxbuf_outstanding;
93 };
94 
95 
96 /* multi-packet data from host */
97 struct __rte_cache_aligned hn_rx_bufinfo {
98 	struct vmbus_channel *chan;
99 	struct hn_rx_queue *rxq;
100 	uint64_t	xactid;
101 	struct rte_mbuf_ext_shared_info shinfo;
102 };
103 
104 #define HN_INVALID_PORT	UINT16_MAX
105 
106 enum vf_device_state {
107 	vf_unknown = 0,
108 	vf_removed,
109 	vf_configured,
110 	vf_started,
111 	vf_stopped,
112 };
113 
114 struct hn_vf_ctx {
115 	uint16_t	vf_port;
116 
117 	/* We have taken ownership of this VF port from DPDK */
118 	bool		vf_attached;
119 
120 	/* VSC has requested to switch data path to VF */
121 	bool		vf_vsc_switched;
122 
123 	/* VSP has reported the VF is present for this NIC */
124 	bool		vf_vsp_reported;
125 
126 	enum vf_device_state	vf_state;
127 };
128 
129 struct hv_hotadd_context {
130 	LIST_ENTRY(hv_hotadd_context) list;
131 	struct hn_data *hv;
132 	struct rte_devargs da;
133 	int eal_hot_plug_retry;
134 };
135 
136 struct hn_data {
137 	struct rte_vmbus_device *vmbus;
138 	struct hn_rx_queue *primary;
139 	rte_rwlock_t    vf_lock;
140 	uint16_t	port_id;
141 
142 	struct hn_vf_ctx	vf_ctx;
143 
144 	uint8_t		closed;
145 	uint8_t		vlan_strip;
146 
147 	uint32_t	link_status;
148 	uint32_t	link_speed;
149 
150 	struct rte_mem_resource rxbuf_res;	/* UIO resource for Rx */
151 	uint32_t	rxbuf_section_cnt;	/* # of Rx sections */
152 	uint32_t	rx_copybreak;
153 	uint32_t	rx_extmbuf_enable;
154 	uint16_t	max_queues;		/* Max available queues */
155 	uint16_t	num_queues;
156 	uint64_t	rss_offloads;
157 
158 	rte_spinlock_t	chim_lock;
159 	struct rte_mem_resource chim_res;	/* UIO resource for Tx */
160 	struct rte_bitmap *chim_bmap;		/* Send buffer map */
161 	void		*chim_bmem;
162 	uint32_t	tx_copybreak;
163 	uint32_t	chim_szmax;		/* Max size per buffer */
164 	uint32_t	chim_cnt;		/* Max packets per buffer */
165 
166 	uint32_t	latency;
167 	uint32_t	nvs_ver;
168 	uint32_t	ndis_ver;
169 	uint32_t	rndis_agg_size;
170 	uint32_t	rndis_agg_pkts;
171 	uint32_t	rndis_agg_align;
172 
173 	volatile uint32_t  rndis_pending;
174 	rte_atomic32_t	rndis_req_id;
175 	uint8_t		rndis_resp[256];
176 
177 	uint32_t	rss_hash;
178 	uint8_t		rss_key[40];
179 	uint16_t	rss_ind[128];
180 
181 	struct rte_eth_dev_owner owner;
182 
183 	struct vmbus_channel *channels[HN_MAX_CHANNELS];
184 
185 	rte_spinlock_t	hotadd_lock;
186 	LIST_HEAD(hotadd_list, hv_hotadd_context) hotadd_list;
187 	char		*vf_devargs;
188 };
189 
190 static inline struct vmbus_channel *
hn_primary_chan(const struct hn_data * hv)191 hn_primary_chan(const struct hn_data *hv)
192 {
193 	return hv->channels[0];
194 }
195 
196 uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id,
197 		       uint32_t tx_limit);
198 
199 uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
200 		      uint16_t nb_pkts);
201 uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
202 		      uint16_t nb_pkts);
203 
204 int	hn_chim_init(struct rte_eth_dev *dev);
205 void	hn_chim_uninit(struct rte_eth_dev *dev);
206 int	hn_dev_link_update(struct rte_eth_dev *dev, int wait);
207 int	hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
208 			      uint16_t nb_desc, unsigned int socket_id,
209 			      const struct rte_eth_txconf *tx_conf);
210 void	hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
211 void	hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
212 			     struct rte_eth_txq_info *qinfo);
213 int	hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);
214 int	hn_dev_tx_descriptor_status(void *arg, uint16_t offset);
215 
216 struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
217 				      uint16_t queue_id,
218 				      unsigned int socket_id);
219 int	hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
220 			      uint16_t queue_idx, uint16_t nb_desc,
221 			      unsigned int socket_id,
222 			      const struct rte_eth_rxconf *rx_conf,
223 			      struct rte_mempool *mp);
224 void	hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
225 			     struct rte_eth_rxq_info *qinfo);
226 void	hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
227 uint32_t hn_dev_rx_queue_count(void *rx_queue);
228 int	hn_dev_rx_queue_status(void *rxq, uint16_t offset);
229 void	hn_dev_free_queues(struct rte_eth_dev *dev);
230 
231 /*
232  * Get VF device for existing netvsc device
233  * Assumes vf_lock is held.
234  */
235 static inline struct rte_eth_dev *
hn_get_vf_dev(const struct hn_data * hv)236 hn_get_vf_dev(const struct hn_data *hv)
237 {
238 	if (hv->vf_ctx.vf_attached)
239 		return &rte_eth_devices[hv->vf_ctx.vf_port];
240 	else
241 		return NULL;
242 }
243 
244 int	hn_vf_info_get(struct hn_data *hv,
245 		       struct rte_eth_dev_info *info);
246 int	hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
247 int	hn_vf_configure_locked(struct rte_eth_dev *dev,
248 			       const struct rte_eth_conf *dev_conf);
249 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev,
250 				       size_t *no_of_elements);
251 int	hn_vf_start(struct rte_eth_dev *dev);
252 int	hn_vf_close(struct rte_eth_dev *dev);
253 int	hn_vf_stop(struct rte_eth_dev *dev);
254 
255 int	hn_vf_allmulticast_enable(struct rte_eth_dev *dev);
256 int	hn_vf_allmulticast_disable(struct rte_eth_dev *dev);
257 int	hn_vf_promiscuous_enable(struct rte_eth_dev *dev);
258 int	hn_vf_promiscuous_disable(struct rte_eth_dev *dev);
259 int	hn_vf_mc_addr_list(struct rte_eth_dev *dev,
260 			   struct rte_ether_addr *mc_addr_set,
261 			   uint32_t nb_mc_addr);
262 
263 int	hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
264 			     uint16_t queue_idx, uint16_t nb_desc,
265 			     unsigned int socket_id,
266 			     const struct rte_eth_txconf *tx_conf);
267 void	hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id);
268 int	hn_vf_tx_queue_status(struct hn_data *hv, uint16_t queue_id, uint16_t offset);
269 
270 int	hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
271 			     uint16_t queue_idx, uint16_t nb_desc,
272 			     unsigned int socket_id,
273 			     const struct rte_eth_rxconf *rx_conf,
274 			     struct rte_mempool *mp);
275 void	hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id);
276 
277 int	hn_vf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
278 int	hn_vf_stats_reset(struct rte_eth_dev *dev);
279 int	hn_vf_xstats_get_names(struct rte_eth_dev *dev,
280 			       struct rte_eth_xstat_name *xstats_names,
281 			       unsigned int size);
282 int	hn_vf_xstats_get(struct rte_eth_dev *dev,
283 			 struct rte_eth_xstat *xstats,
284 			 unsigned int offset, unsigned int n);
285 int	hn_vf_xstats_reset(struct rte_eth_dev *dev);
286 int	hn_vf_rss_hash_update(struct rte_eth_dev *dev,
287 			      struct rte_eth_rss_conf *rss_conf);
288 int	hn_vf_reta_hash_update(struct rte_eth_dev *dev,
289 			       struct rte_eth_rss_reta_entry64 *reta_conf,
290 			       uint16_t reta_size);
291 int hn_vf_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
292 int	hn_eth_rmv_event_callback(uint16_t port_id,
293 				  enum rte_eth_event_type event __rte_unused,
294 				  void *cb_arg, void *out __rte_unused);
295