xref: /dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c (revision 27b549c12df2ef2db6b271795b4df7b14a2d9c2c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <fcntl.h>
13 #include <inttypes.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
17 
18 #include <rte_interrupts.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_pci.h>
22 #include <rte_bus_pci.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
26 #include <rte_eal.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_ethdev_pci.h>
31 #include <rte_string_fns.h>
32 #include <rte_malloc.h>
33 #include <rte_dev.h>
34 
35 #include "base/vmxnet3_defs.h"
36 
37 #include "vmxnet3_ring.h"
38 #include "vmxnet3_logs.h"
39 #include "vmxnet3_ethdev.h"
40 
41 #define PROCESS_SYS_EVENTS 0
42 
43 #define	VMXNET3_TX_MAX_SEG	UINT8_MAX
44 
45 #define VMXNET3_TX_OFFLOAD_CAP		\
46 	(DEV_TX_OFFLOAD_VLAN_INSERT |	\
47 	 DEV_TX_OFFLOAD_TCP_CKSUM |	\
48 	 DEV_TX_OFFLOAD_UDP_CKSUM |	\
49 	 DEV_TX_OFFLOAD_TCP_TSO |	\
50 	 DEV_TX_OFFLOAD_MULTI_SEGS)
51 
52 #define VMXNET3_RX_OFFLOAD_CAP		\
53 	(DEV_RX_OFFLOAD_VLAN_STRIP |	\
54 	 DEV_RX_OFFLOAD_VLAN_FILTER |   \
55 	 DEV_RX_OFFLOAD_SCATTER |	\
56 	 DEV_RX_OFFLOAD_UDP_CKSUM |	\
57 	 DEV_RX_OFFLOAD_TCP_CKSUM |	\
58 	 DEV_RX_OFFLOAD_TCP_LRO |	\
59 	 DEV_RX_OFFLOAD_JUMBO_FRAME |   \
60 	 DEV_RX_OFFLOAD_RSS_HASH)
61 
62 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
63 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
64 static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
65 static int vmxnet3_dev_start(struct rte_eth_dev *dev);
66 static int vmxnet3_dev_stop(struct rte_eth_dev *dev);
67 static int vmxnet3_dev_close(struct rte_eth_dev *dev);
68 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
69 static int vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
70 static int vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
71 static int vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
72 static int vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
73 static int __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
74 				     int wait_to_complete);
75 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
76 				   int wait_to_complete);
77 static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
78 static int vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
79 				  struct rte_eth_stats *stats);
80 static int vmxnet3_dev_stats_reset(struct rte_eth_dev *dev);
81 static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
82 					struct rte_eth_xstat_name *xstats,
83 					unsigned int n);
84 static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev,
85 				  struct rte_eth_xstat *xstats, unsigned int n);
86 static int vmxnet3_dev_info_get(struct rte_eth_dev *dev,
87 				struct rte_eth_dev_info *dev_info);
88 static const uint32_t *
89 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
90 static int vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
91 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
92 				       uint16_t vid, int on);
93 static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
94 static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
95 				 struct rte_ether_addr *mac_addr);
96 static void vmxnet3_interrupt_handler(void *param);
97 
98 /*
99  * The set of PCI devices this driver supports
100  */
101 #define VMWARE_PCI_VENDOR_ID 0x15AD
102 #define VMWARE_DEV_ID_VMXNET3 0x07B0
103 static const struct rte_pci_id pci_id_vmxnet3_map[] = {
104 	{ RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) },
105 	{ .vendor_id = 0, /* sentinel */ },
106 };
107 
108 static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
109 	.dev_configure        = vmxnet3_dev_configure,
110 	.dev_start            = vmxnet3_dev_start,
111 	.dev_stop             = vmxnet3_dev_stop,
112 	.dev_close            = vmxnet3_dev_close,
113 	.promiscuous_enable   = vmxnet3_dev_promiscuous_enable,
114 	.promiscuous_disable  = vmxnet3_dev_promiscuous_disable,
115 	.allmulticast_enable  = vmxnet3_dev_allmulticast_enable,
116 	.allmulticast_disable = vmxnet3_dev_allmulticast_disable,
117 	.link_update          = vmxnet3_dev_link_update,
118 	.stats_get            = vmxnet3_dev_stats_get,
119 	.xstats_get_names     = vmxnet3_dev_xstats_get_names,
120 	.xstats_get           = vmxnet3_dev_xstats_get,
121 	.stats_reset          = vmxnet3_dev_stats_reset,
122 	.mac_addr_set         = vmxnet3_mac_addr_set,
123 	.dev_infos_get        = vmxnet3_dev_info_get,
124 	.dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
125 	.mtu_set              = vmxnet3_dev_mtu_set,
126 	.vlan_filter_set      = vmxnet3_dev_vlan_filter_set,
127 	.vlan_offload_set     = vmxnet3_dev_vlan_offload_set,
128 	.rx_queue_setup       = vmxnet3_dev_rx_queue_setup,
129 	.rx_queue_release     = vmxnet3_dev_rx_queue_release,
130 	.tx_queue_setup       = vmxnet3_dev_tx_queue_setup,
131 	.tx_queue_release     = vmxnet3_dev_tx_queue_release,
132 };
133 
134 struct vmxnet3_xstats_name_off {
135 	char name[RTE_ETH_XSTATS_NAME_SIZE];
136 	unsigned int offset;
137 };
138 
139 /* tx_qX_ is prepended to the name string here */
140 static const struct vmxnet3_xstats_name_off vmxnet3_txq_stat_strings[] = {
141 	{"drop_total",         offsetof(struct vmxnet3_txq_stats, drop_total)},
142 	{"drop_too_many_segs", offsetof(struct vmxnet3_txq_stats, drop_too_many_segs)},
143 	{"drop_tso",           offsetof(struct vmxnet3_txq_stats, drop_tso)},
144 	{"tx_ring_full",       offsetof(struct vmxnet3_txq_stats, tx_ring_full)},
145 };
146 
147 /* rx_qX_ is prepended to the name string here */
148 static const struct vmxnet3_xstats_name_off vmxnet3_rxq_stat_strings[] = {
149 	{"drop_total",           offsetof(struct vmxnet3_rxq_stats, drop_total)},
150 	{"drop_err",             offsetof(struct vmxnet3_rxq_stats, drop_err)},
151 	{"drop_fcs",             offsetof(struct vmxnet3_rxq_stats, drop_fcs)},
152 	{"rx_buf_alloc_failure", offsetof(struct vmxnet3_rxq_stats, rx_buf_alloc_failure)},
153 };
154 
155 static const struct rte_memzone *
156 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
157 		 const char *post_string, int socket_id,
158 		 uint16_t align, bool reuse)
159 {
160 	char z_name[RTE_MEMZONE_NAMESIZE];
161 	const struct rte_memzone *mz;
162 
163 	snprintf(z_name, sizeof(z_name), "eth_p%d_%s",
164 			dev->data->port_id, post_string);
165 
166 	mz = rte_memzone_lookup(z_name);
167 	if (!reuse) {
168 		if (mz)
169 			rte_memzone_free(mz);
170 		return rte_memzone_reserve_aligned(z_name, size, socket_id,
171 				RTE_MEMZONE_IOVA_CONTIG, align);
172 	}
173 
174 	if (mz)
175 		return mz;
176 
177 	return rte_memzone_reserve_aligned(z_name, size, socket_id,
178 			RTE_MEMZONE_IOVA_CONTIG, align);
179 }
180 
181 /*
182  * This function is based on vmxnet3_disable_intr()
183  */
184 static void
185 vmxnet3_disable_intr(struct vmxnet3_hw *hw)
186 {
187 	int i;
188 
189 	PMD_INIT_FUNC_TRACE();
190 
191 	hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
192 	for (i = 0; i < hw->num_intrs; i++)
193 		VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
194 }
195 
196 static void
197 vmxnet3_enable_intr(struct vmxnet3_hw *hw)
198 {
199 	int i;
200 
201 	PMD_INIT_FUNC_TRACE();
202 
203 	hw->shared->devRead.intrConf.intrCtrl &= ~VMXNET3_IC_DISABLE_ALL;
204 	for (i = 0; i < hw->num_intrs; i++)
205 		VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 0);
206 }
207 
208 /*
209  * Gets tx data ring descriptor size.
210  */
211 static uint16_t
212 eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
213 {
214 	uint16 txdata_desc_size;
215 
216 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
217 			       VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
218 	txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
219 
220 	return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
221 		txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
222 		txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ?
223 		sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size;
224 }
225 
226 /*
227  * It returns 0 on success.
228  */
229 static int
230 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
231 {
232 	struct rte_pci_device *pci_dev;
233 	struct vmxnet3_hw *hw = eth_dev->data->dev_private;
234 	uint32_t mac_hi, mac_lo, ver;
235 	struct rte_eth_link link;
236 
237 	PMD_INIT_FUNC_TRACE();
238 
239 	eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
240 	eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
241 	eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
242 	eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
243 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
244 
245 	/*
246 	 * for secondary processes, we don't initialize any further as primary
247 	 * has already done this work.
248 	 */
249 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
250 		return 0;
251 
252 	rte_eth_copy_pci_info(eth_dev, pci_dev);
253 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
254 
255 	/* Vendor and Device ID need to be set before init of shared code */
256 	hw->device_id = pci_dev->id.device_id;
257 	hw->vendor_id = pci_dev->id.vendor_id;
258 	hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
259 	hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
260 
261 	hw->num_rx_queues = 1;
262 	hw->num_tx_queues = 1;
263 	hw->bufs_per_pkt = 1;
264 
265 	/* Check h/w version compatibility with driver. */
266 	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
267 	PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
268 
269 	if (ver & (1 << VMXNET3_REV_4)) {
270 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
271 				       1 << VMXNET3_REV_4);
272 		hw->version = VMXNET3_REV_4 + 1;
273 	} else if (ver & (1 << VMXNET3_REV_3)) {
274 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
275 				       1 << VMXNET3_REV_3);
276 		hw->version = VMXNET3_REV_3 + 1;
277 	} else if (ver & (1 << VMXNET3_REV_2)) {
278 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
279 				       1 << VMXNET3_REV_2);
280 		hw->version = VMXNET3_REV_2 + 1;
281 	} else if (ver & (1 << VMXNET3_REV_1)) {
282 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
283 				       1 << VMXNET3_REV_1);
284 		hw->version = VMXNET3_REV_1 + 1;
285 	} else {
286 		PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver);
287 		return -EIO;
288 	}
289 
290 	PMD_INIT_LOG(DEBUG, "Using device version %d\n", hw->version);
291 
292 	/* Check UPT version compatibility with driver. */
293 	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
294 	PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
295 	if (ver & 0x1)
296 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
297 	else {
298 		PMD_INIT_LOG(ERR, "Incompatible UPT version.");
299 		return -EIO;
300 	}
301 
302 	/* Getting MAC Address */
303 	mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
304 	mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
305 	memcpy(hw->perm_addr, &mac_lo, 4);
306 	memcpy(hw->perm_addr + 4, &mac_hi, 2);
307 
308 	/* Allocate memory for storing MAC addresses */
309 	eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", RTE_ETHER_ADDR_LEN *
310 					       VMXNET3_MAX_MAC_ADDRS, 0);
311 	if (eth_dev->data->mac_addrs == NULL) {
312 		PMD_INIT_LOG(ERR,
313 			     "Failed to allocate %d bytes needed to store MAC addresses",
314 			     RTE_ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
315 		return -ENOMEM;
316 	}
317 	/* Copy the permanent MAC address */
318 	rte_ether_addr_copy((struct rte_ether_addr *)hw->perm_addr,
319 			&eth_dev->data->mac_addrs[0]);
320 
321 	PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
322 		     hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
323 		     hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
324 
325 	/* Put device in Quiesce Mode */
326 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
327 
328 	/* allow untagged pkts */
329 	VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
330 
331 	hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
332 		eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc);
333 
334 	hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
335 		VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
336 	RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
337 		   hw->rxdata_desc_size);
338 
339 	/* clear shadow stats */
340 	memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
341 	memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
342 
343 	/* clear snapshot stats */
344 	memset(hw->snapshot_tx_stats, 0, sizeof(hw->snapshot_tx_stats));
345 	memset(hw->snapshot_rx_stats, 0, sizeof(hw->snapshot_rx_stats));
346 
347 	/* set the initial link status */
348 	memset(&link, 0, sizeof(link));
349 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
350 	link.link_speed = ETH_SPEED_NUM_10G;
351 	link.link_autoneg = ETH_LINK_FIXED;
352 	rte_eth_linkstatus_set(eth_dev, &link);
353 
354 	return 0;
355 }
356 
357 static int
358 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
359 {
360 	struct vmxnet3_hw *hw = eth_dev->data->dev_private;
361 
362 	PMD_INIT_FUNC_TRACE();
363 
364 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
365 		return 0;
366 
367 	if (hw->adapter_stopped == 0) {
368 		PMD_INIT_LOG(DEBUG, "Device has not been closed.");
369 		return -EBUSY;
370 	}
371 
372 	return 0;
373 }
374 
375 static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
376 	struct rte_pci_device *pci_dev)
377 {
378 	return rte_eth_dev_pci_generic_probe(pci_dev,
379 		sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init);
380 }
381 
382 static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
383 {
384 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit);
385 }
386 
387 static struct rte_pci_driver rte_vmxnet3_pmd = {
388 	.id_table = pci_id_vmxnet3_map,
389 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
390 	.probe = eth_vmxnet3_pci_probe,
391 	.remove = eth_vmxnet3_pci_remove,
392 };
393 
394 static int
395 vmxnet3_dev_configure(struct rte_eth_dev *dev)
396 {
397 	const struct rte_memzone *mz;
398 	struct vmxnet3_hw *hw = dev->data->dev_private;
399 	size_t size;
400 
401 	PMD_INIT_FUNC_TRACE();
402 
403 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
404 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
405 
406 	if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
407 	    dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
408 		PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
409 		return -EINVAL;
410 	}
411 
412 	if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
413 		PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2");
414 		return -EINVAL;
415 	}
416 
417 	size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
418 		dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
419 
420 	if (size > UINT16_MAX)
421 		return -EINVAL;
422 
423 	hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
424 	hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
425 
426 	/*
427 	 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
428 	 * on current socket
429 	 */
430 	mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
431 			      "shared", rte_socket_id(), 8, 1);
432 
433 	if (mz == NULL) {
434 		PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
435 		return -ENOMEM;
436 	}
437 	memset(mz->addr, 0, mz->len);
438 
439 	hw->shared = mz->addr;
440 	hw->sharedPA = mz->iova;
441 
442 	/*
443 	 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
444 	 * on current socket.
445 	 *
446 	 * We cannot reuse this memzone from previous allocation as its size
447 	 * depends on the number of tx and rx queues, which could be different
448 	 * from one config to another.
449 	 */
450 	mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(),
451 			      VMXNET3_QUEUE_DESC_ALIGN, 0);
452 	if (mz == NULL) {
453 		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
454 		return -ENOMEM;
455 	}
456 	memset(mz->addr, 0, mz->len);
457 
458 	hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
459 	hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
460 
461 	hw->queueDescPA = mz->iova;
462 	hw->queue_desc_len = (uint16_t)size;
463 
464 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
465 		/* Allocate memory structure for UPT1_RSSConf and configure */
466 		mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
467 				      "rss_conf", rte_socket_id(),
468 				      RTE_CACHE_LINE_SIZE, 1);
469 		if (mz == NULL) {
470 			PMD_INIT_LOG(ERR,
471 				     "ERROR: Creating rss_conf structure zone");
472 			return -ENOMEM;
473 		}
474 		memset(mz->addr, 0, mz->len);
475 
476 		hw->rss_conf = mz->addr;
477 		hw->rss_confPA = mz->iova;
478 	}
479 
480 	return 0;
481 }
482 
483 static void
484 vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
485 {
486 	uint32_t val;
487 
488 	PMD_INIT_LOG(DEBUG,
489 		     "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
490 		     addr[0], addr[1], addr[2],
491 		     addr[3], addr[4], addr[5]);
492 
493 	memcpy(&val, addr, 4);
494 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
495 
496 	memcpy(&val, addr + 4, 2);
497 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
498 }
499 
500 static int
501 vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
502 {
503 	struct vmxnet3_hw *hw = dev->data->dev_private;
504 	Vmxnet3_DriverShared *shared = hw->shared;
505 	Vmxnet3_CmdInfo *cmdInfo;
506 	struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES];
507 	uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES];
508 	uint32_t num, i, j, size;
509 
510 	if (hw->memRegsPA == 0) {
511 		const struct rte_memzone *mz;
512 
513 		size = sizeof(Vmxnet3_MemRegs) +
514 			(VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) *
515 			sizeof(Vmxnet3_MemoryRegion);
516 
517 		mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8,
518 				      1);
519 		if (mz == NULL) {
520 			PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone");
521 			return -ENOMEM;
522 		}
523 		memset(mz->addr, 0, mz->len);
524 		hw->memRegs = mz->addr;
525 		hw->memRegsPA = mz->iova;
526 	}
527 
528 	num = hw->num_rx_queues;
529 
530 	for (i = 0; i < num; i++) {
531 		vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
532 
533 		mp[i] = rxq->mp;
534 		index[i] = 1 << i;
535 	}
536 
537 	/*
538 	 * The same mempool could be used by multiple queues. In such a case,
539 	 * remove duplicate mempool entries. Only one entry is kept with
540 	 * bitmask indicating queues that are using this mempool.
541 	 */
542 	for (i = 1; i < num; i++) {
543 		for (j = 0; j < i; j++) {
544 			if (mp[i] == mp[j]) {
545 				mp[i] = NULL;
546 				index[j] |= 1 << i;
547 				break;
548 			}
549 		}
550 	}
551 
552 	j = 0;
553 	for (i = 0; i < num; i++) {
554 		if (mp[i] == NULL)
555 			continue;
556 
557 		Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
558 
559 		mr->startPA =
560 			(uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->iova;
561 		mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
562 			STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
563 		mr->txQueueBits = index[i];
564 		mr->rxQueueBits = index[i];
565 
566 		PMD_INIT_LOG(INFO,
567 			     "index: %u startPA: %" PRIu64 " length: %u, "
568 			     "rxBits: %x",
569 			     j, mr->startPA, mr->length, mr->rxQueueBits);
570 		j++;
571 	}
572 	hw->memRegs->numRegs = j;
573 	PMD_INIT_LOG(INFO, "numRegs: %u", j);
574 
575 	size = sizeof(Vmxnet3_MemRegs) +
576 		(j - 1) * sizeof(Vmxnet3_MemoryRegion);
577 
578 	cmdInfo = &shared->cu.cmdInfo;
579 	cmdInfo->varConf.confVer = 1;
580 	cmdInfo->varConf.confLen = size;
581 	cmdInfo->varConf.confPA = hw->memRegsPA;
582 
583 	return 0;
584 }
585 
586 static int
587 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
588 {
589 	struct rte_eth_conf port_conf = dev->data->dev_conf;
590 	struct vmxnet3_hw *hw = dev->data->dev_private;
591 	uint32_t mtu = dev->data->mtu;
592 	Vmxnet3_DriverShared *shared = hw->shared;
593 	Vmxnet3_DSDevRead *devRead = &shared->devRead;
594 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
595 	uint32_t i;
596 	int ret;
597 
598 	hw->mtu = mtu;
599 
600 	shared->magic = VMXNET3_REV1_MAGIC;
601 	devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
602 
603 	/* Setting up Guest OS information */
604 	devRead->misc.driverInfo.gos.gosBits   = sizeof(void *) == 4 ?
605 		VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64;
606 	devRead->misc.driverInfo.gos.gosType   = VMXNET3_GOS_TYPE_LINUX;
607 	devRead->misc.driverInfo.vmxnet3RevSpt = 1;
608 	devRead->misc.driverInfo.uptVerSpt     = 1;
609 
610 	devRead->misc.mtu = rte_le_to_cpu_32(mtu);
611 	devRead->misc.queueDescPA  = hw->queueDescPA;
612 	devRead->misc.queueDescLen = hw->queue_desc_len;
613 	devRead->misc.numTxQueues  = hw->num_tx_queues;
614 	devRead->misc.numRxQueues  = hw->num_rx_queues;
615 
616 	/*
617 	 * Set number of interrupts to 1
618 	 * PMD by default disables all the interrupts but this is MUST
619 	 * to activate device. It needs at least one interrupt for
620 	 * link events to handle
621 	 */
622 	hw->num_intrs = devRead->intrConf.numIntrs = 1;
623 	devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
624 
625 	for (i = 0; i < hw->num_tx_queues; i++) {
626 		Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
627 		vmxnet3_tx_queue_t *txq  = dev->data->tx_queues[i];
628 
629 		txq->shared = &hw->tqd_start[i];
630 
631 		tqd->ctrl.txNumDeferred  = 0;
632 		tqd->ctrl.txThreshold    = 1;
633 		tqd->conf.txRingBasePA   = txq->cmd_ring.basePA;
634 		tqd->conf.compRingBasePA = txq->comp_ring.basePA;
635 		tqd->conf.dataRingBasePA = txq->data_ring.basePA;
636 
637 		tqd->conf.txRingSize   = txq->cmd_ring.size;
638 		tqd->conf.compRingSize = txq->comp_ring.size;
639 		tqd->conf.dataRingSize = txq->data_ring.size;
640 		tqd->conf.txDataRingDescSize = txq->txdata_desc_size;
641 		tqd->conf.intrIdx      = txq->comp_ring.intr_idx;
642 		tqd->status.stopped    = TRUE;
643 		tqd->status.error      = 0;
644 		memset(&tqd->stats, 0, sizeof(tqd->stats));
645 	}
646 
647 	for (i = 0; i < hw->num_rx_queues; i++) {
648 		Vmxnet3_RxQueueDesc *rqd  = &hw->rqd_start[i];
649 		vmxnet3_rx_queue_t *rxq   = dev->data->rx_queues[i];
650 
651 		rxq->shared = &hw->rqd_start[i];
652 
653 		rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
654 		rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
655 		rqd->conf.compRingBasePA  = rxq->comp_ring.basePA;
656 
657 		rqd->conf.rxRingSize[0]   = rxq->cmd_ring[0].size;
658 		rqd->conf.rxRingSize[1]   = rxq->cmd_ring[1].size;
659 		rqd->conf.compRingSize    = rxq->comp_ring.size;
660 		rqd->conf.intrIdx         = rxq->comp_ring.intr_idx;
661 		if (VMXNET3_VERSION_GE_3(hw)) {
662 			rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA;
663 			rqd->conf.rxDataRingDescSize = rxq->data_desc_size;
664 		}
665 		rqd->status.stopped       = TRUE;
666 		rqd->status.error         = 0;
667 		memset(&rqd->stats, 0, sizeof(rqd->stats));
668 	}
669 
670 	/* RxMode set to 0 of VMXNET3_RXM_xxx */
671 	devRead->rxFilterConf.rxMode = 0;
672 
673 	/* Setting up feature flags */
674 	if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
675 		devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
676 
677 	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
678 		devRead->misc.uptFeatures |= VMXNET3_F_LRO;
679 		devRead->misc.maxNumRxSG = 0;
680 	}
681 
682 	if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
683 		ret = vmxnet3_rss_configure(dev);
684 		if (ret != VMXNET3_SUCCESS)
685 			return ret;
686 
687 		devRead->misc.uptFeatures |= VMXNET3_F_RSS;
688 		devRead->rssConfDesc.confVer = 1;
689 		devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
690 		devRead->rssConfDesc.confPA  = hw->rss_confPA;
691 	}
692 
693 	ret = vmxnet3_dev_vlan_offload_set(dev,
694 			ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
695 	if (ret)
696 		return ret;
697 
698 	vmxnet3_write_mac(hw, dev->data->mac_addrs->addr_bytes);
699 
700 	return VMXNET3_SUCCESS;
701 }
702 
703 /*
704  * Configure device link speed and setup link.
705  * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
706  * It returns 0 on success.
707  */
708 static int
709 vmxnet3_dev_start(struct rte_eth_dev *dev)
710 {
711 	int ret;
712 	struct vmxnet3_hw *hw = dev->data->dev_private;
713 
714 	PMD_INIT_FUNC_TRACE();
715 
716 	/* Save stats before it is reset by CMD_ACTIVATE */
717 	vmxnet3_hw_stats_save(hw);
718 
719 	ret = vmxnet3_setup_driver_shared(dev);
720 	if (ret != VMXNET3_SUCCESS)
721 		return ret;
722 
723 	/* check if lsc interrupt feature is enabled */
724 	if (dev->data->dev_conf.intr_conf.lsc) {
725 		struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
726 
727 		/* Setup interrupt callback  */
728 		rte_intr_callback_register(&pci_dev->intr_handle,
729 					   vmxnet3_interrupt_handler, dev);
730 
731 		if (rte_intr_enable(&pci_dev->intr_handle) < 0) {
732 			PMD_INIT_LOG(ERR, "interrupt enable failed");
733 			return -EIO;
734 		}
735 	}
736 
737 	/* Exchange shared data with device */
738 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
739 			       VMXNET3_GET_ADDR_LO(hw->sharedPA));
740 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
741 			       VMXNET3_GET_ADDR_HI(hw->sharedPA));
742 
743 	/* Activate device by register write */
744 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
745 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
746 
747 	if (ret != 0) {
748 		PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
749 		return -EINVAL;
750 	}
751 
752 	/* Setup memory region for rx buffers */
753 	ret = vmxnet3_dev_setup_memreg(dev);
754 	if (ret == 0) {
755 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
756 				       VMXNET3_CMD_REGISTER_MEMREGS);
757 		ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
758 		if (ret != 0)
759 			PMD_INIT_LOG(DEBUG,
760 				     "Failed in setup memory region cmd\n");
761 		ret = 0;
762 	} else {
763 		PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
764 	}
765 
766 	if (VMXNET3_VERSION_GE_4(hw) &&
767 	    dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
768 		/* Check for additional RSS  */
769 		ret = vmxnet3_v4_rss_configure(dev);
770 		if (ret != VMXNET3_SUCCESS) {
771 			PMD_INIT_LOG(ERR, "Failed to configure v4 RSS");
772 			return ret;
773 		}
774 	}
775 
776 	/* Disable interrupts */
777 	vmxnet3_disable_intr(hw);
778 
779 	/*
780 	 * Load RX queues with blank mbufs and update next2fill index for device
781 	 * Update RxMode of the device
782 	 */
783 	ret = vmxnet3_dev_rxtx_init(dev);
784 	if (ret != VMXNET3_SUCCESS) {
785 		PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL");
786 		return ret;
787 	}
788 
789 	hw->adapter_stopped = FALSE;
790 
791 	/* Setting proper Rx Mode and issue Rx Mode Update command */
792 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
793 
794 	if (dev->data->dev_conf.intr_conf.lsc) {
795 		vmxnet3_enable_intr(hw);
796 
797 		/*
798 		 * Update link state from device since this won't be
799 		 * done upon starting with lsc in use. This is done
800 		 * only after enabling interrupts to avoid any race
801 		 * where the link state could change without an
802 		 * interrupt being fired.
803 		 */
804 		__vmxnet3_dev_link_update(dev, 0);
805 	}
806 
807 	return VMXNET3_SUCCESS;
808 }
809 
810 /*
811  * Stop device: disable rx and tx functions to allow for reconfiguring.
812  */
813 static int
814 vmxnet3_dev_stop(struct rte_eth_dev *dev)
815 {
816 	struct rte_eth_link link;
817 	struct vmxnet3_hw *hw = dev->data->dev_private;
818 
819 	PMD_INIT_FUNC_TRACE();
820 
821 	if (hw->adapter_stopped == 1) {
822 		PMD_INIT_LOG(DEBUG, "Device already stopped.");
823 		return 0;
824 	}
825 
826 	/* disable interrupts */
827 	vmxnet3_disable_intr(hw);
828 
829 	if (dev->data->dev_conf.intr_conf.lsc) {
830 		struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
831 
832 		rte_intr_disable(&pci_dev->intr_handle);
833 
834 		rte_intr_callback_unregister(&pci_dev->intr_handle,
835 					     vmxnet3_interrupt_handler, dev);
836 	}
837 
838 	/* quiesce the device first */
839 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
840 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
841 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
842 
843 	/* reset the device */
844 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
845 	PMD_INIT_LOG(DEBUG, "Device reset.");
846 
847 	vmxnet3_dev_clear_queues(dev);
848 
849 	/* Clear recorded link status */
850 	memset(&link, 0, sizeof(link));
851 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
852 	link.link_speed = ETH_SPEED_NUM_10G;
853 	link.link_autoneg = ETH_LINK_FIXED;
854 	rte_eth_linkstatus_set(dev, &link);
855 
856 	hw->adapter_stopped = 1;
857 	dev->data->dev_started = 0;
858 
859 	return 0;
860 }
861 
862 static void
863 vmxnet3_free_queues(struct rte_eth_dev *dev)
864 {
865 	int i;
866 
867 	PMD_INIT_FUNC_TRACE();
868 
869 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
870 		void *rxq = dev->data->rx_queues[i];
871 
872 		vmxnet3_dev_rx_queue_release(rxq);
873 	}
874 	dev->data->nb_rx_queues = 0;
875 
876 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
877 		void *txq = dev->data->tx_queues[i];
878 
879 		vmxnet3_dev_tx_queue_release(txq);
880 	}
881 	dev->data->nb_tx_queues = 0;
882 }
883 
884 /*
885  * Reset and stop device.
886  */
887 static int
888 vmxnet3_dev_close(struct rte_eth_dev *dev)
889 {
890 	int ret;
891 	PMD_INIT_FUNC_TRACE();
892 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
893 		return 0;
894 
895 	ret = vmxnet3_dev_stop(dev);
896 	vmxnet3_free_queues(dev);
897 
898 	return ret;
899 }
900 
901 static void
902 vmxnet3_hw_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
903 			struct UPT1_TxStats *res)
904 {
905 #define VMXNET3_UPDATE_TX_STAT(h, i, f, r)		\
906 		((r)->f = (h)->tqd_start[(i)].stats.f +	\
907 			(h)->saved_tx_stats[(i)].f)
908 
909 	VMXNET3_UPDATE_TX_STAT(hw, q, ucastPktsTxOK, res);
910 	VMXNET3_UPDATE_TX_STAT(hw, q, mcastPktsTxOK, res);
911 	VMXNET3_UPDATE_TX_STAT(hw, q, bcastPktsTxOK, res);
912 	VMXNET3_UPDATE_TX_STAT(hw, q, ucastBytesTxOK, res);
913 	VMXNET3_UPDATE_TX_STAT(hw, q, mcastBytesTxOK, res);
914 	VMXNET3_UPDATE_TX_STAT(hw, q, bcastBytesTxOK, res);
915 	VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxError, res);
916 	VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxDiscard, res);
917 
918 #undef VMXNET3_UPDATE_TX_STAT
919 }
920 
921 static void
922 vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
923 			struct UPT1_RxStats *res)
924 {
925 #define VMXNET3_UPDATE_RX_STAT(h, i, f, r)		\
926 		((r)->f = (h)->rqd_start[(i)].stats.f +	\
927 			(h)->saved_rx_stats[(i)].f)
928 
929 	VMXNET3_UPDATE_RX_STAT(hw, q, ucastPktsRxOK, res);
930 	VMXNET3_UPDATE_RX_STAT(hw, q, mcastPktsRxOK, res);
931 	VMXNET3_UPDATE_RX_STAT(hw, q, bcastPktsRxOK, res);
932 	VMXNET3_UPDATE_RX_STAT(hw, q, ucastBytesRxOK, res);
933 	VMXNET3_UPDATE_RX_STAT(hw, q, mcastBytesRxOK, res);
934 	VMXNET3_UPDATE_RX_STAT(hw, q, bcastBytesRxOK, res);
935 	VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res);
936 	VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res);
937 
938 #undef VMXNET3_UPDATE_RX_STAT
939 }
940 
941 static void
942 vmxnet3_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
943 					struct UPT1_TxStats *res)
944 {
945 		vmxnet3_hw_tx_stats_get(hw, q, res);
946 
947 #define VMXNET3_REDUCE_SNAPSHOT_TX_STAT(h, i, f, r)	\
948 		((r)->f -= (h)->snapshot_tx_stats[(i)].f)
949 
950 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastPktsTxOK, res);
951 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastPktsTxOK, res);
952 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastPktsTxOK, res);
953 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastBytesTxOK, res);
954 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastBytesTxOK, res);
955 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastBytesTxOK, res);
956 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxError, res);
957 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxDiscard, res);
958 
959 #undef VMXNET3_REDUCE_SNAPSHOT_TX_STAT
960 }
961 
962 static void
963 vmxnet3_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
964 					struct UPT1_RxStats *res)
965 {
966 		vmxnet3_hw_rx_stats_get(hw, q, res);
967 
968 #define VMXNET3_REDUCE_SNAPSHOT_RX_STAT(h, i, f, r)	\
969 		((r)->f -= (h)->snapshot_rx_stats[(i)].f)
970 
971 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastPktsRxOK, res);
972 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastPktsRxOK, res);
973 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastPktsRxOK, res);
974 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastBytesRxOK, res);
975 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastBytesRxOK, res);
976 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastBytesRxOK, res);
977 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxError, res);
978 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxOutOfBuf, res);
979 
980 #undef VMXNET3_REDUCE_SNAPSHOT_RX_STAT
981 }
982 
983 static void
984 vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
985 {
986 	unsigned int i;
987 
988 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
989 
990 	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
991 
992 	for (i = 0; i < hw->num_tx_queues; i++)
993 		vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
994 	for (i = 0; i < hw->num_rx_queues; i++)
995 		vmxnet3_hw_rx_stats_get(hw, i, &hw->saved_rx_stats[i]);
996 }
997 
998 static int
999 vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
1000 			     struct rte_eth_xstat_name *xstats_names,
1001 			     unsigned int n)
1002 {
1003 	unsigned int i, t, count = 0;
1004 	unsigned int nstats =
1005 		dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
1006 		dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
1007 
1008 	if (!xstats_names || n < nstats)
1009 		return nstats;
1010 
1011 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1012 		if (!dev->data->rx_queues[i])
1013 			continue;
1014 
1015 		for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
1016 			snprintf(xstats_names[count].name,
1017 				 sizeof(xstats_names[count].name),
1018 				 "rx_q%u_%s", i,
1019 				 vmxnet3_rxq_stat_strings[t].name);
1020 			count++;
1021 		}
1022 	}
1023 
1024 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1025 		if (!dev->data->tx_queues[i])
1026 			continue;
1027 
1028 		for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
1029 			snprintf(xstats_names[count].name,
1030 				 sizeof(xstats_names[count].name),
1031 				 "tx_q%u_%s", i,
1032 				 vmxnet3_txq_stat_strings[t].name);
1033 			count++;
1034 		}
1035 	}
1036 
1037 	return count;
1038 }
1039 
1040 static int
1041 vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1042 		       unsigned int n)
1043 {
1044 	unsigned int i, t, count = 0;
1045 	unsigned int nstats =
1046 		dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
1047 		dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
1048 
1049 	if (n < nstats)
1050 		return nstats;
1051 
1052 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1053 		struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
1054 
1055 		if (rxq == NULL)
1056 			continue;
1057 
1058 		for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
1059 			xstats[count].value = *(uint64_t *)(((char *)&rxq->stats) +
1060 				vmxnet3_rxq_stat_strings[t].offset);
1061 			xstats[count].id = count;
1062 			count++;
1063 		}
1064 	}
1065 
1066 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1067 		struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
1068 
1069 		if (txq == NULL)
1070 			continue;
1071 
1072 		for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
1073 			xstats[count].value = *(uint64_t *)(((char *)&txq->stats) +
1074 				vmxnet3_txq_stat_strings[t].offset);
1075 			xstats[count].id = count;
1076 			count++;
1077 		}
1078 	}
1079 
1080 	return count;
1081 }
1082 
1083 static int
1084 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1085 {
1086 	unsigned int i;
1087 	struct vmxnet3_hw *hw = dev->data->dev_private;
1088 	struct UPT1_TxStats txStats;
1089 	struct UPT1_RxStats rxStats;
1090 
1091 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1092 
1093 	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1094 	for (i = 0; i < hw->num_tx_queues; i++) {
1095 		vmxnet3_tx_stats_get(hw, i, &txStats);
1096 
1097 		stats->q_opackets[i] = txStats.ucastPktsTxOK +
1098 			txStats.mcastPktsTxOK +
1099 			txStats.bcastPktsTxOK;
1100 
1101 		stats->q_obytes[i] = txStats.ucastBytesTxOK +
1102 			txStats.mcastBytesTxOK +
1103 			txStats.bcastBytesTxOK;
1104 
1105 		stats->opackets += stats->q_opackets[i];
1106 		stats->obytes += stats->q_obytes[i];
1107 		stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
1108 	}
1109 
1110 	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
1111 	for (i = 0; i < hw->num_rx_queues; i++) {
1112 		vmxnet3_rx_stats_get(hw, i, &rxStats);
1113 
1114 		stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
1115 			rxStats.mcastPktsRxOK +
1116 			rxStats.bcastPktsRxOK;
1117 
1118 		stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
1119 			rxStats.mcastBytesRxOK +
1120 			rxStats.bcastBytesRxOK;
1121 
1122 		stats->ipackets += stats->q_ipackets[i];
1123 		stats->ibytes += stats->q_ibytes[i];
1124 
1125 		stats->q_errors[i] = rxStats.pktsRxError;
1126 		stats->ierrors += rxStats.pktsRxError;
1127 		stats->imissed += rxStats.pktsRxOutOfBuf;
1128 	}
1129 
1130 	return 0;
1131 }
1132 
1133 static int
1134 vmxnet3_dev_stats_reset(struct rte_eth_dev *dev)
1135 {
1136 	unsigned int i;
1137 	struct vmxnet3_hw *hw = dev->data->dev_private;
1138 	struct UPT1_TxStats txStats = {0};
1139 	struct UPT1_RxStats rxStats = {0};
1140 
1141 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1142 
1143 	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1144 
1145 	for (i = 0; i < hw->num_tx_queues; i++) {
1146 		vmxnet3_hw_tx_stats_get(hw, i, &txStats);
1147 		memcpy(&hw->snapshot_tx_stats[i], &txStats,
1148 			sizeof(hw->snapshot_tx_stats[0]));
1149 	}
1150 	for (i = 0; i < hw->num_rx_queues; i++) {
1151 		vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
1152 		memcpy(&hw->snapshot_rx_stats[i], &rxStats,
1153 			sizeof(hw->snapshot_rx_stats[0]));
1154 	}
1155 
1156 	return 0;
1157 }
1158 
1159 static int
1160 vmxnet3_dev_info_get(struct rte_eth_dev *dev,
1161 		     struct rte_eth_dev_info *dev_info)
1162 {
1163 	struct vmxnet3_hw *hw = dev->data->dev_private;
1164 
1165 	dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
1166 	dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
1167 	dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
1168 	dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
1169 	dev_info->min_mtu = VMXNET3_MIN_MTU;
1170 	dev_info->max_mtu = VMXNET3_MAX_MTU;
1171 	dev_info->speed_capa = ETH_LINK_SPEED_10G;
1172 	dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
1173 
1174 	dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
1175 
1176 	if (VMXNET3_VERSION_GE_4(hw)) {
1177 		dev_info->flow_type_rss_offloads |= VMXNET3_V4_RSS_MASK;
1178 	}
1179 
1180 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1181 		.nb_max = VMXNET3_RX_RING_MAX_SIZE,
1182 		.nb_min = VMXNET3_DEF_RX_RING_SIZE,
1183 		.nb_align = 1,
1184 	};
1185 
1186 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1187 		.nb_max = VMXNET3_TX_RING_MAX_SIZE,
1188 		.nb_min = VMXNET3_DEF_TX_RING_SIZE,
1189 		.nb_align = 1,
1190 		.nb_seg_max = VMXNET3_TX_MAX_SEG,
1191 		.nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
1192 	};
1193 
1194 	dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP;
1195 	dev_info->rx_queue_offload_capa = 0;
1196 	dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP;
1197 	dev_info->tx_queue_offload_capa = 0;
1198 
1199 	return 0;
1200 }
1201 
1202 static const uint32_t *
1203 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1204 {
1205 	static const uint32_t ptypes[] = {
1206 		RTE_PTYPE_L3_IPV4_EXT,
1207 		RTE_PTYPE_L3_IPV4,
1208 		RTE_PTYPE_UNKNOWN
1209 	};
1210 
1211 	if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
1212 		return ptypes;
1213 	return NULL;
1214 }
1215 
1216 static int
1217 vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, __rte_unused uint16_t mtu)
1218 {
1219 	if (dev->data->dev_started) {
1220 		PMD_DRV_LOG(ERR, "Port %d must be stopped to configure MTU",
1221 			    dev->data->port_id);
1222 		return -EBUSY;
1223 	}
1224 
1225 	return 0;
1226 }
1227 
1228 static int
1229 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1230 {
1231 	struct vmxnet3_hw *hw = dev->data->dev_private;
1232 
1233 	rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)(hw->perm_addr));
1234 	vmxnet3_write_mac(hw, mac_addr->addr_bytes);
1235 	return 0;
1236 }
1237 
1238 /* return 0 means link status changed, -1 means not changed */
1239 static int
1240 __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
1241 			  __rte_unused int wait_to_complete)
1242 {
1243 	struct vmxnet3_hw *hw = dev->data->dev_private;
1244 	struct rte_eth_link link;
1245 	uint32_t ret;
1246 
1247 	memset(&link, 0, sizeof(link));
1248 
1249 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
1250 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1251 
1252 	if (ret & 0x1)
1253 		link.link_status = ETH_LINK_UP;
1254 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1255 	link.link_speed = ETH_SPEED_NUM_10G;
1256 	link.link_autoneg = ETH_LINK_FIXED;
1257 
1258 	return rte_eth_linkstatus_set(dev, &link);
1259 }
1260 
1261 static int
1262 vmxnet3_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1263 {
1264 	/* Link status doesn't change for stopped dev */
1265 	if (dev->data->dev_started == 0)
1266 		return -1;
1267 
1268 	return __vmxnet3_dev_link_update(dev, wait_to_complete);
1269 }
1270 
1271 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
1272 static void
1273 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
1274 {
1275 	struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1276 
1277 	if (set)
1278 		rxConf->rxMode = rxConf->rxMode | feature;
1279 	else
1280 		rxConf->rxMode = rxConf->rxMode & (~feature);
1281 
1282 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
1283 }
1284 
1285 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1286 static int
1287 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
1288 {
1289 	struct vmxnet3_hw *hw = dev->data->dev_private;
1290 	uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1291 
1292 	memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
1293 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
1294 
1295 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1296 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1297 
1298 	return 0;
1299 }
1300 
1301 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1302 static int
1303 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
1304 {
1305 	struct vmxnet3_hw *hw = dev->data->dev_private;
1306 	uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1307 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1308 
1309 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1310 		memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1311 	else
1312 		memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1313 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
1314 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1315 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1316 
1317 	return 0;
1318 }
1319 
1320 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1321 static int
1322 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
1323 {
1324 	struct vmxnet3_hw *hw = dev->data->dev_private;
1325 
1326 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
1327 
1328 	return 0;
1329 }
1330 
1331 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1332 static int
1333 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
1334 {
1335 	struct vmxnet3_hw *hw = dev->data->dev_private;
1336 
1337 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
1338 
1339 	return 0;
1340 }
1341 
1342 /* Enable/disable filter on vlan */
1343 static int
1344 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
1345 {
1346 	struct vmxnet3_hw *hw = dev->data->dev_private;
1347 	struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1348 	uint32_t *vf_table = rxConf->vfTable;
1349 
1350 	/* save state for restore */
1351 	if (on)
1352 		VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1353 	else
1354 		VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1355 
1356 	/* don't change active filter if in promiscuous mode */
1357 	if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
1358 		return 0;
1359 
1360 	/* set in hardware */
1361 	if (on)
1362 		VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
1363 	else
1364 		VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
1365 
1366 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1367 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1368 	return 0;
1369 }
1370 
1371 static int
1372 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1373 {
1374 	struct vmxnet3_hw *hw = dev->data->dev_private;
1375 	Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
1376 	uint32_t *vf_table = devRead->rxFilterConf.vfTable;
1377 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1378 
1379 	if (mask & ETH_VLAN_STRIP_MASK) {
1380 		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1381 			devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1382 		else
1383 			devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1384 
1385 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1386 				       VMXNET3_CMD_UPDATE_FEATURE);
1387 	}
1388 
1389 	if (mask & ETH_VLAN_FILTER_MASK) {
1390 		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1391 			memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1392 		else
1393 			memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1394 
1395 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1396 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1397 	}
1398 
1399 	return 0;
1400 }
1401 
1402 static void
1403 vmxnet3_process_events(struct rte_eth_dev *dev)
1404 {
1405 	struct vmxnet3_hw *hw = dev->data->dev_private;
1406 	uint32_t events = hw->shared->ecr;
1407 
1408 	if (!events)
1409 		return;
1410 
1411 	/*
1412 	 * ECR bits when written with 1b are cleared. Hence write
1413 	 * events back to ECR so that the bits which were set will be reset.
1414 	 */
1415 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
1416 
1417 	/* Check if link state has changed */
1418 	if (events & VMXNET3_ECR_LINK) {
1419 		PMD_DRV_LOG(DEBUG, "Process events: VMXNET3_ECR_LINK event");
1420 		if (vmxnet3_dev_link_update(dev, 0) == 0)
1421 			rte_eth_dev_callback_process(dev,
1422 						     RTE_ETH_EVENT_INTR_LSC,
1423 						     NULL);
1424 	}
1425 
1426 	/* Check if there is an error on xmit/recv queues */
1427 	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
1428 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1429 				       VMXNET3_CMD_GET_QUEUE_STATUS);
1430 
1431 		if (hw->tqd_start->status.stopped)
1432 			PMD_DRV_LOG(ERR, "tq error 0x%x",
1433 				    hw->tqd_start->status.error);
1434 
1435 		if (hw->rqd_start->status.stopped)
1436 			PMD_DRV_LOG(ERR, "rq error 0x%x",
1437 				     hw->rqd_start->status.error);
1438 
1439 		/* Reset the device */
1440 		/* Have to reset the device */
1441 	}
1442 
1443 	if (events & VMXNET3_ECR_DIC)
1444 		PMD_DRV_LOG(DEBUG, "Device implementation change event.");
1445 
1446 	if (events & VMXNET3_ECR_DEBUG)
1447 		PMD_DRV_LOG(DEBUG, "Debug event generated by device.");
1448 }
1449 
1450 static void
1451 vmxnet3_interrupt_handler(void *param)
1452 {
1453 	struct rte_eth_dev *dev = param;
1454 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1455 
1456 	vmxnet3_process_events(dev);
1457 
1458 	if (rte_intr_ack(&pci_dev->intr_handle) < 0)
1459 		PMD_DRV_LOG(ERR, "interrupt enable failed");
1460 }
1461 
1462 RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
1463 RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
1464 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
1465 RTE_LOG_REGISTER(vmxnet3_logtype_init, pmd.net.vmxnet3.init, NOTICE);
1466 RTE_LOG_REGISTER(vmxnet3_logtype_driver, pmd.net.vmxnet3.driver, NOTICE);
1467