xref: /dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c (revision 3da59f30a23f2e795d2315f3d949e1b3e0ce0c3d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdarg.h>
13 #include <fcntl.h>
14 #include <inttypes.h>
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
18 
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_pci.h>
23 #include <bus_pci_driver.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_memzone.h>
27 #include <rte_eal.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <ethdev_driver.h>
31 #include <ethdev_pci.h>
32 #include <rte_string_fns.h>
33 #include <rte_malloc.h>
34 #include <dev_driver.h>
35 
36 #include "base/vmxnet3_defs.h"
37 
38 #include "vmxnet3_ring.h"
39 #include "vmxnet3_logs.h"
40 #include "vmxnet3_ethdev.h"
41 
42 #define	VMXNET3_TX_MAX_SEG	UINT8_MAX
43 
44 #define VMXNET3_TX_OFFLOAD_CAP		\
45 	(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |	\
46 	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |	\
47 	 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
48 	 RTE_ETH_TX_OFFLOAD_TCP_TSO |	\
49 	 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
50 
51 #define VMXNET3_RX_OFFLOAD_CAP		\
52 	(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |	\
53 	 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |   \
54 	 RTE_ETH_RX_OFFLOAD_SCATTER |	\
55 	 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
56 	 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |	\
57 	 RTE_ETH_RX_OFFLOAD_TCP_LRO |	\
58 	 RTE_ETH_RX_OFFLOAD_RSS_HASH)
59 
60 int vmxnet3_segs_dynfield_offset = -1;
61 
62 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
63 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
64 static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
65 static int vmxnet3_dev_start(struct rte_eth_dev *dev);
66 static int vmxnet3_dev_stop(struct rte_eth_dev *dev);
67 static int vmxnet3_dev_close(struct rte_eth_dev *dev);
68 static int vmxnet3_dev_reset(struct rte_eth_dev *dev);
69 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
70 static int vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
71 static int vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
72 static int vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
73 static int vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
74 static int __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
75 				     int wait_to_complete);
76 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
77 				   int wait_to_complete);
78 static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
79 static int vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
80 				  struct rte_eth_stats *stats);
81 static int vmxnet3_dev_stats_reset(struct rte_eth_dev *dev);
82 static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
83 					struct rte_eth_xstat_name *xstats,
84 					unsigned int n);
85 static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev,
86 				  struct rte_eth_xstat *xstats, unsigned int n);
87 static int vmxnet3_dev_info_get(struct rte_eth_dev *dev,
88 				struct rte_eth_dev_info *dev_info);
89 static int vmxnet3_hw_ver_get(struct rte_eth_dev *dev,
90 			      char *fw_version, size_t fw_size);
91 static const uint32_t *
92 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
93 static int vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
94 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
95 				       uint16_t vid, int on);
96 static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
97 static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
98 				 struct rte_ether_addr *mac_addr);
99 static void vmxnet3_process_events(struct rte_eth_dev *dev);
100 static void vmxnet3_interrupt_handler(void *param);
101 static int
102 vmxnet3_rss_reta_update(struct rte_eth_dev *dev,
103 			struct rte_eth_rss_reta_entry64 *reta_conf,
104 			uint16_t reta_size);
105 static int
106 vmxnet3_rss_reta_query(struct rte_eth_dev *dev,
107 		       struct rte_eth_rss_reta_entry64 *reta_conf,
108 		       uint16_t reta_size);
109 
110 static int vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
111 						uint16_t queue_id);
112 static int vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
113 						uint16_t queue_id);
114 
115 /*
116  * The set of PCI devices this driver supports
117  */
118 #define VMWARE_PCI_VENDOR_ID 0x15AD
119 #define VMWARE_DEV_ID_VMXNET3 0x07B0
120 static const struct rte_pci_id pci_id_vmxnet3_map[] = {
121 	{ RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) },
122 	{ .vendor_id = 0, /* sentinel */ },
123 };
124 
125 static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
126 	.dev_configure        = vmxnet3_dev_configure,
127 	.dev_start            = vmxnet3_dev_start,
128 	.dev_stop             = vmxnet3_dev_stop,
129 	.dev_close            = vmxnet3_dev_close,
130 	.dev_reset            = vmxnet3_dev_reset,
131 	.link_update          = vmxnet3_dev_link_update,
132 	.promiscuous_enable   = vmxnet3_dev_promiscuous_enable,
133 	.promiscuous_disable  = vmxnet3_dev_promiscuous_disable,
134 	.allmulticast_enable  = vmxnet3_dev_allmulticast_enable,
135 	.allmulticast_disable = vmxnet3_dev_allmulticast_disable,
136 	.mac_addr_set         = vmxnet3_mac_addr_set,
137 	.mtu_set              = vmxnet3_dev_mtu_set,
138 	.stats_get            = vmxnet3_dev_stats_get,
139 	.stats_reset          = vmxnet3_dev_stats_reset,
140 	.xstats_get           = vmxnet3_dev_xstats_get,
141 	.xstats_get_names     = vmxnet3_dev_xstats_get_names,
142 	.dev_infos_get        = vmxnet3_dev_info_get,
143 	.fw_version_get       = vmxnet3_hw_ver_get,
144 	.dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
145 	.vlan_filter_set      = vmxnet3_dev_vlan_filter_set,
146 	.vlan_offload_set     = vmxnet3_dev_vlan_offload_set,
147 	.rx_queue_setup       = vmxnet3_dev_rx_queue_setup,
148 	.rx_queue_release     = vmxnet3_dev_rx_queue_release,
149 	.rx_queue_intr_enable = vmxnet3_dev_rx_queue_intr_enable,
150 	.rx_queue_intr_disable = vmxnet3_dev_rx_queue_intr_disable,
151 	.tx_queue_setup       = vmxnet3_dev_tx_queue_setup,
152 	.tx_queue_release     = vmxnet3_dev_tx_queue_release,
153 	.reta_update          = vmxnet3_rss_reta_update,
154 	.reta_query           = vmxnet3_rss_reta_query,
155 };
156 
157 struct vmxnet3_xstats_name_off {
158 	char name[RTE_ETH_XSTATS_NAME_SIZE];
159 	unsigned int offset;
160 };
161 
162 /* tx_qX_ is prepended to the name string here */
163 static const struct vmxnet3_xstats_name_off vmxnet3_txq_stat_strings[] = {
164 	{"drop_total",         offsetof(struct vmxnet3_txq_stats, drop_total)},
165 	{"drop_too_many_segs", offsetof(struct vmxnet3_txq_stats, drop_too_many_segs)},
166 	{"drop_tso",           offsetof(struct vmxnet3_txq_stats, drop_tso)},
167 	{"tx_ring_full",       offsetof(struct vmxnet3_txq_stats, tx_ring_full)},
168 };
169 
170 /* rx_qX_ is prepended to the name string here */
171 static const struct vmxnet3_xstats_name_off vmxnet3_rxq_stat_strings[] = {
172 	{"drop_total",           offsetof(struct vmxnet3_rxq_stats, drop_total)},
173 	{"drop_err",             offsetof(struct vmxnet3_rxq_stats, drop_err)},
174 	{"drop_fcs",             offsetof(struct vmxnet3_rxq_stats, drop_fcs)},
175 	{"rx_buf_alloc_failure", offsetof(struct vmxnet3_rxq_stats, rx_buf_alloc_failure)},
176 };
177 
178 static const struct rte_memzone *
179 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
180 		 const char *post_string, int socket_id,
181 		 uint16_t align, bool reuse)
182 {
183 	char z_name[RTE_MEMZONE_NAMESIZE];
184 	const struct rte_memzone *mz;
185 
186 	snprintf(z_name, sizeof(z_name), "eth_p%d_%s",
187 			dev->data->port_id, post_string);
188 
189 	mz = rte_memzone_lookup(z_name);
190 	if (!reuse) {
191 		if (mz)
192 			rte_memzone_free(mz);
193 		return rte_memzone_reserve_aligned(z_name, size, socket_id,
194 				RTE_MEMZONE_IOVA_CONTIG, align);
195 	}
196 
197 	if (mz)
198 		return mz;
199 
200 	return rte_memzone_reserve_aligned(z_name, size, socket_id,
201 			RTE_MEMZONE_IOVA_CONTIG, align);
202 }
203 
204 /*
205  * Enable the given interrupt
206  */
207 static void
208 vmxnet3_enable_intr(struct vmxnet3_hw *hw, unsigned int intr_idx)
209 {
210 	PMD_INIT_FUNC_TRACE();
211 	VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + intr_idx * 8, 0);
212 }
213 
214 /*
215  * Disable the given interrupt
216  */
217 static void
218 vmxnet3_disable_intr(struct vmxnet3_hw *hw, unsigned int intr_idx)
219 {
220 	PMD_INIT_FUNC_TRACE();
221 	VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + intr_idx * 8, 1);
222 }
223 
224 /*
225  * Simple helper to get intrCtrl and eventIntrIdx based on config and hw version
226  */
227 static void
228 vmxnet3_get_intr_ctrl_ev(struct vmxnet3_hw *hw,
229 			 uint8 **out_eventIntrIdx,
230 			 uint32 **out_intrCtrl)
231 {
232 
233 	if (VMXNET3_VERSION_GE_6(hw) && hw->queuesExtEnabled) {
234 		*out_eventIntrIdx = &hw->shared->devReadExt.intrConfExt.eventIntrIdx;
235 		*out_intrCtrl = &hw->shared->devReadExt.intrConfExt.intrCtrl;
236 	} else {
237 		*out_eventIntrIdx = &hw->shared->devRead.intrConf.eventIntrIdx;
238 		*out_intrCtrl = &hw->shared->devRead.intrConf.intrCtrl;
239 	}
240 }
241 
242 /*
243  * Disable all intrs used by the device
244  */
245 static void
246 vmxnet3_disable_all_intrs(struct vmxnet3_hw *hw)
247 {
248 	int i;
249 	uint8 *eventIntrIdx;
250 	uint32 *intrCtrl;
251 
252 	PMD_INIT_FUNC_TRACE();
253 	vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl);
254 
255 	*intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
256 
257 	for (i = 0; i < hw->intr.num_intrs; i++)
258 		vmxnet3_disable_intr(hw, i);
259 }
260 
261 /*
262  * Enable all intrs used by the device
263  */
264 static void
265 vmxnet3_enable_all_intrs(struct vmxnet3_hw *hw)
266 {
267 	uint8 *eventIntrIdx;
268 	uint32 *intrCtrl;
269 
270 	PMD_INIT_FUNC_TRACE();
271 	vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl);
272 
273 	*intrCtrl &= rte_cpu_to_le_32(~VMXNET3_IC_DISABLE_ALL);
274 
275 	if (hw->intr.lsc_only) {
276 		vmxnet3_enable_intr(hw, *eventIntrIdx);
277 	} else {
278 		int i;
279 
280 		for (i = 0; i < hw->intr.num_intrs; i++)
281 			vmxnet3_enable_intr(hw, i);
282 	}
283 }
284 
285 /*
286  * Gets tx data ring descriptor size.
287  */
288 static uint16_t
289 eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
290 {
291 	uint16 txdata_desc_size;
292 
293 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
294 			       VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
295 	txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
296 
297 	return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
298 		txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
299 		txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ?
300 		sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size;
301 }
302 
303 static int
304 eth_vmxnet3_setup_capabilities(struct vmxnet3_hw *hw,
305 			       struct rte_eth_dev *eth_dev)
306 {
307 	uint32_t dcr, ptcr, value;
308 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
309 
310 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
311 			       VMXNET3_CMD_GET_MAX_CAPABILITIES);
312 	value = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
313 	hw->max_capabilities[0] = value;
314 	dcr = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_DCR);
315 	hw->DCR_capabilities[0] = dcr;
316 	hw->used_DCR_capabilities[0] = 0;
317 	ptcr = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_PTCR);
318 	hw->PTCR_capabilities[0] = ptcr;
319 	hw->used_PTCR_capabilities[0] = 0;
320 
321 	if (hw->uptv2_enabled && !(ptcr & (1 << VMXNET3_DCR_ERROR))) {
322 		PMD_DRV_LOG(NOTICE, "UPTv2 enabled");
323 		hw->used_PTCR_capabilities[0] = ptcr;
324 	} else {
325 		/* Use all DCR capabilities, but disable large bar */
326 		hw->used_DCR_capabilities[0] = dcr &
327 					(~(1UL << VMXNET3_CAP_LARGE_BAR));
328 		PMD_DRV_LOG(NOTICE, "UPTv2 disabled");
329 	}
330 	if (hw->DCR_capabilities[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
331 	    hw->PTCR_capabilities[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
332 		if (hw->uptv2_enabled) {
333 			hw->used_PTCR_capabilities[0] |=
334 				(1UL << VMXNET3_CAP_OOORX_COMP);
335 		}
336 	}
337 	if (hw->used_PTCR_capabilities[0]) {
338 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DCR,
339 				       hw->used_PTCR_capabilities[0]);
340 	} else if (hw->used_DCR_capabilities[0]) {
341 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DCR,
342 				       hw->used_DCR_capabilities[0]);
343 	}
344 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
345 	dcr = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
346 	hw->used_DCR_capabilities[0] = dcr;
347 	PMD_DRV_LOG(DEBUG, "Dev " PCI_PRI_FMT ", vmxnet3 v%d, UPT enabled: %s, "
348 		    "DCR0=0x%08x, used DCR=0x%08x, "
349 		    "PTCR=0x%08x, used PTCR=0x%08x",
350 		    pci_dev->addr.domain, pci_dev->addr.bus,
351 		    pci_dev->addr.devid, pci_dev->addr.function, hw->version,
352 		    hw->uptv2_enabled ? "true" : "false",
353 		    hw->DCR_capabilities[0], hw->used_DCR_capabilities[0],
354 		    hw->PTCR_capabilities[0], hw->used_PTCR_capabilities[0]);
355 	return 0;
356 }
357 
358 /*
359  * It returns 0 on success.
360  */
361 static int
362 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
363 {
364 	struct rte_pci_device *pci_dev;
365 	struct vmxnet3_hw *hw = eth_dev->data->dev_private;
366 	uint32_t mac_hi, mac_lo, ver;
367 	struct rte_eth_link link;
368 	static const struct rte_mbuf_dynfield vmxnet3_segs_dynfield_desc = {
369 		.name = VMXNET3_SEGS_DYNFIELD_NAME,
370 		.size = sizeof(vmxnet3_segs_dynfield_t),
371 		.align = alignof(vmxnet3_segs_dynfield_t),
372 	};
373 
374 	PMD_INIT_FUNC_TRACE();
375 
376 	eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
377 	eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
378 	eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
379 	eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
380 	eth_dev->rx_queue_count = vmxnet3_dev_rx_queue_count;
381 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
382 
383 	/* extra mbuf field is required to guess MSS */
384 	vmxnet3_segs_dynfield_offset =
385 		rte_mbuf_dynfield_register(&vmxnet3_segs_dynfield_desc);
386 	if (vmxnet3_segs_dynfield_offset < 0) {
387 		PMD_INIT_LOG(ERR, "Cannot register mbuf field.");
388 		return -rte_errno;
389 	}
390 
391 	/*
392 	 * for secondary processes, we don't initialize any further as primary
393 	 * has already done this work.
394 	 */
395 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
396 		return 0;
397 
398 	rte_eth_copy_pci_info(eth_dev, pci_dev);
399 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
400 
401 	/* Vendor and Device ID need to be set before init of shared code */
402 	hw->device_id = pci_dev->id.device_id;
403 	hw->vendor_id = pci_dev->id.vendor_id;
404 	hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
405 	hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
406 
407 	hw->num_rx_queues = 1;
408 	hw->num_tx_queues = 1;
409 	hw->bufs_per_pkt = 1;
410 
411 	/* Check h/w version compatibility with driver. */
412 	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
413 
414 	if (ver & (1 << VMXNET3_REV_7)) {
415 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
416 				       1 << VMXNET3_REV_7);
417 		hw->version = VMXNET3_REV_7 + 1;
418 	} else if (ver & (1 << VMXNET3_REV_6)) {
419 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
420 				       1 << VMXNET3_REV_6);
421 		hw->version = VMXNET3_REV_6 + 1;
422 	} else if (ver & (1 << VMXNET3_REV_5)) {
423 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
424 				       1 << VMXNET3_REV_5);
425 		hw->version = VMXNET3_REV_5 + 1;
426 	} else if (ver & (1 << VMXNET3_REV_4)) {
427 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
428 				       1 << VMXNET3_REV_4);
429 		hw->version = VMXNET3_REV_4 + 1;
430 	} else if (ver & (1 << VMXNET3_REV_3)) {
431 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
432 				       1 << VMXNET3_REV_3);
433 		hw->version = VMXNET3_REV_3 + 1;
434 	} else if (ver & (1 << VMXNET3_REV_2)) {
435 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
436 				       1 << VMXNET3_REV_2);
437 		hw->version = VMXNET3_REV_2 + 1;
438 	} else if (ver & (1 << VMXNET3_REV_1)) {
439 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
440 				       1 << VMXNET3_REV_1);
441 		hw->version = VMXNET3_REV_1 + 1;
442 	} else {
443 		PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver);
444 		return -EIO;
445 	}
446 
447 	PMD_INIT_LOG(INFO, "Using device v%d", hw->version);
448 
449 	/* Check UPT version compatibility with driver. */
450 	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
451 	PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
452 	if (ver & 0x1)
453 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
454 	else {
455 		PMD_INIT_LOG(ERR, "Incompatible UPT version.");
456 		return -EIO;
457 	}
458 
459 	if (VMXNET3_VERSION_GE_7(hw)) {
460 		/* start with UPTv2 enabled to avoid ESXi issues */
461 		hw->uptv2_enabled = TRUE;
462 		eth_vmxnet3_setup_capabilities(hw, eth_dev);
463 	}
464 
465 	if (hw->used_DCR_capabilities[0] & (1 << VMXNET3_CAP_LARGE_BAR)) {
466 		hw->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
467 		hw->rx_prod_offset[0] = VMXNET3_REG_LB_RXPROD;
468 		hw->rx_prod_offset[1] = VMXNET3_REG_LB_RXPROD2;
469 	} else {
470 		hw->tx_prod_offset = VMXNET3_REG_TXPROD;
471 		hw->rx_prod_offset[0] = VMXNET3_REG_RXPROD;
472 		hw->rx_prod_offset[1] = VMXNET3_REG_RXPROD2;
473 	}
474 
475 	/* Getting MAC Address */
476 	mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
477 	mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
478 	memcpy(hw->perm_addr, &mac_lo, 4);
479 	memcpy(hw->perm_addr + 4, &mac_hi, 2);
480 
481 	/* Allocate memory for storing MAC addresses */
482 	eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", RTE_ETHER_ADDR_LEN *
483 					       VMXNET3_MAX_MAC_ADDRS, 0);
484 	if (eth_dev->data->mac_addrs == NULL) {
485 		PMD_INIT_LOG(ERR,
486 			     "Failed to allocate %d bytes needed to store MAC addresses",
487 			     RTE_ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
488 		return -ENOMEM;
489 	}
490 	/* Copy the permanent MAC address */
491 	rte_ether_addr_copy((struct rte_ether_addr *)hw->perm_addr,
492 			&eth_dev->data->mac_addrs[0]);
493 
494 	PMD_INIT_LOG(DEBUG, "MAC Address : " RTE_ETHER_ADDR_PRT_FMT,
495 		     hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
496 		     hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
497 
498 	/* Put device in Quiesce Mode */
499 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
500 
501 	/* allow untagged pkts */
502 	VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
503 
504 	hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
505 		eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc);
506 
507 	hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
508 		VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
509 	RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
510 		   hw->rxdata_desc_size);
511 
512 	/* clear shadow stats */
513 	memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
514 	memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
515 
516 	/* clear snapshot stats */
517 	memset(hw->snapshot_tx_stats, 0, sizeof(hw->snapshot_tx_stats));
518 	memset(hw->snapshot_rx_stats, 0, sizeof(hw->snapshot_rx_stats));
519 
520 	/* set the initial link status */
521 	memset(&link, 0, sizeof(link));
522 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
523 	link.link_speed = RTE_ETH_SPEED_NUM_10G;
524 	link.link_autoneg = RTE_ETH_LINK_FIXED;
525 	rte_eth_linkstatus_set(eth_dev, &link);
526 
527 	return 0;
528 }
529 
530 static int
531 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
532 {
533 	struct vmxnet3_hw *hw = eth_dev->data->dev_private;
534 
535 	PMD_INIT_FUNC_TRACE();
536 
537 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
538 		return 0;
539 
540 	if (hw->adapter_stopped == 0) {
541 		PMD_INIT_LOG(DEBUG, "Device has not been closed.");
542 		return -EBUSY;
543 	}
544 
545 	return 0;
546 }
547 
548 static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
549 	struct rte_pci_device *pci_dev)
550 {
551 	return rte_eth_dev_pci_generic_probe(pci_dev,
552 		sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init);
553 }
554 
555 static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
556 {
557 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit);
558 }
559 
560 static struct rte_pci_driver rte_vmxnet3_pmd = {
561 	.id_table = pci_id_vmxnet3_map,
562 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
563 	.probe = eth_vmxnet3_pci_probe,
564 	.remove = eth_vmxnet3_pci_remove,
565 };
566 
567 static void
568 vmxnet3_alloc_intr_resources(struct rte_eth_dev *dev)
569 {
570 	struct vmxnet3_hw *hw = dev->data->dev_private;
571 	uint32_t cfg;
572 	int nvec = 1; /* for link event */
573 
574 	/* intr settings */
575 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
576 			       VMXNET3_CMD_GET_CONF_INTR);
577 	cfg = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
578 	hw->intr.type = cfg & 0x3;
579 	hw->intr.mask_mode = (cfg >> 2) & 0x3;
580 
581 	if (hw->intr.type == VMXNET3_IT_AUTO)
582 		hw->intr.type = VMXNET3_IT_MSIX;
583 
584 	if (hw->intr.type == VMXNET3_IT_MSIX) {
585 		/* only support shared tx/rx intr */
586 		if (hw->num_tx_queues != hw->num_rx_queues)
587 			goto msix_err;
588 
589 		nvec += hw->num_rx_queues;
590 		hw->intr.num_intrs = nvec;
591 		return;
592 	}
593 
594 msix_err:
595 	/* the tx/rx queue interrupt will be disabled */
596 	hw->intr.num_intrs = 2;
597 	hw->intr.lsc_only = TRUE;
598 	PMD_INIT_LOG(INFO, "Enabled MSI-X with %d vectors", hw->intr.num_intrs);
599 }
600 
601 static int
602 vmxnet3_dev_configure(struct rte_eth_dev *dev)
603 {
604 	const struct rte_memzone *mz;
605 	struct vmxnet3_hw *hw = dev->data->dev_private;
606 	size_t size;
607 
608 	PMD_INIT_FUNC_TRACE();
609 
610 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
611 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
612 
613 	if (!VMXNET3_VERSION_GE_6(hw)) {
614 		if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
615 			PMD_INIT_LOG(ERR,
616 				     "ERROR: Number of rx queues not power of 2");
617 			return -EINVAL;
618 		}
619 	}
620 
621 	/* At this point, the number of queues requested has already
622 	 * been validated against dev_infos max queues by EAL
623 	 */
624 	if (dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES ||
625 	    dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES) {
626 		hw->queuesExtEnabled = 1;
627 	} else {
628 		hw->queuesExtEnabled = 0;
629 	}
630 
631 	size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
632 		dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
633 
634 	if (size > UINT16_MAX)
635 		return -EINVAL;
636 
637 	hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
638 	hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
639 
640 	/*
641 	 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
642 	 * on current socket
643 	 */
644 	mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
645 			      "shared", rte_socket_id(), 8, 1);
646 
647 	if (mz == NULL) {
648 		PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
649 		return -ENOMEM;
650 	}
651 	memset(mz->addr, 0, mz->len);
652 
653 	hw->shared = mz->addr;
654 	hw->sharedPA = mz->iova;
655 
656 	/*
657 	 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
658 	 * on current socket.
659 	 *
660 	 * We cannot reuse this memzone from previous allocation as its size
661 	 * depends on the number of tx and rx queues, which could be different
662 	 * from one config to another.
663 	 */
664 	mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(),
665 			      VMXNET3_QUEUE_DESC_ALIGN, 0);
666 	if (mz == NULL) {
667 		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
668 		return -ENOMEM;
669 	}
670 	memset(mz->addr, 0, mz->len);
671 
672 	hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
673 	hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
674 
675 	hw->queueDescPA = mz->iova;
676 	hw->queue_desc_len = (uint16_t)size;
677 
678 	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
679 		/* Allocate memory structure for UPT1_RSSConf and configure */
680 		mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
681 				      "rss_conf", rte_socket_id(),
682 				      RTE_CACHE_LINE_SIZE, 1);
683 		if (mz == NULL) {
684 			PMD_INIT_LOG(ERR,
685 				     "ERROR: Creating rss_conf structure zone");
686 			return -ENOMEM;
687 		}
688 		memset(mz->addr, 0, mz->len);
689 
690 		hw->rss_conf = mz->addr;
691 		hw->rss_confPA = mz->iova;
692 	}
693 
694 	vmxnet3_alloc_intr_resources(dev);
695 
696 	return 0;
697 }
698 
699 static void
700 vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
701 {
702 	uint32_t val;
703 
704 	PMD_INIT_LOG(DEBUG,
705 		     "Writing MAC Address : " RTE_ETHER_ADDR_PRT_FMT,
706 		     addr[0], addr[1], addr[2],
707 		     addr[3], addr[4], addr[5]);
708 
709 	memcpy(&val, addr, 4);
710 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
711 
712 	memcpy(&val, addr + 4, 2);
713 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
714 }
715 
716 /*
717  * Configure the hardware to generate MSI-X interrupts.
718  * If setting up MSIx fails, try setting up MSI (only 1 interrupt vector
719  * which will be disabled to allow lsc to work).
720  *
721  * Returns 0 on success and -1 otherwise.
722  */
723 static int
724 vmxnet3_configure_msix(struct rte_eth_dev *dev)
725 {
726 	struct vmxnet3_hw *hw = dev->data->dev_private;
727 	struct rte_intr_handle *intr_handle = dev->intr_handle;
728 	uint16_t intr_vector;
729 	int i;
730 
731 	hw->intr.event_intr_idx = 0;
732 
733 	/* only vfio-pci driver can support interrupt mode. */
734 	if (!rte_intr_cap_multiple(intr_handle) ||
735 	    dev->data->dev_conf.intr_conf.rxq == 0)
736 		return -1;
737 
738 	intr_vector = dev->data->nb_rx_queues;
739 	if (intr_vector > MAX_RX_QUEUES(hw)) {
740 		PMD_INIT_LOG(ERR, "At most %d intr queues supported",
741 			     MAX_RX_QUEUES(hw));
742 		return -ENOTSUP;
743 	}
744 
745 	if (rte_intr_efd_enable(intr_handle, intr_vector)) {
746 		PMD_INIT_LOG(ERR, "Failed to enable fastpath event fd");
747 		return -1;
748 	}
749 
750 	if (rte_intr_dp_is_en(intr_handle)) {
751 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
752 						   dev->data->nb_rx_queues)) {
753 			PMD_INIT_LOG(ERR, "Failed to allocate %d Rx queues intr_vec",
754 					dev->data->nb_rx_queues);
755 			rte_intr_efd_disable(intr_handle);
756 			return -ENOMEM;
757 		}
758 	}
759 
760 	if (!rte_intr_allow_others(intr_handle) &&
761 	    dev->data->dev_conf.intr_conf.lsc != 0) {
762 		PMD_INIT_LOG(ERR, "not enough intr vector to support both Rx interrupt and LSC");
763 		rte_intr_vec_list_free(intr_handle);
764 		rte_intr_efd_disable(intr_handle);
765 		return -1;
766 	}
767 
768 	/* if we cannot allocate one MSI-X vector per queue, don't enable
769 	 * interrupt mode.
770 	 */
771 	if (hw->intr.num_intrs !=
772 				(rte_intr_nb_efd_get(intr_handle) + 1)) {
773 		PMD_INIT_LOG(ERR, "Device configured with %d Rx intr vectors, expecting %d",
774 				hw->intr.num_intrs,
775 				rte_intr_nb_efd_get(intr_handle) + 1);
776 		rte_intr_vec_list_free(intr_handle);
777 		rte_intr_efd_disable(intr_handle);
778 		return -1;
779 	}
780 
781 	for (i = 0; i < dev->data->nb_rx_queues; i++)
782 		if (rte_intr_vec_list_index_set(intr_handle, i, i + 1))
783 			return -rte_errno;
784 
785 	for (i = 0; i < hw->intr.num_intrs; i++)
786 		hw->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
787 
788 	PMD_INIT_LOG(INFO, "intr type %u, mode %u, %u vectors allocated",
789 		    hw->intr.type, hw->intr.mask_mode, hw->intr.num_intrs);
790 
791 	return 0;
792 }
793 
794 static int
795 vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
796 {
797 	struct vmxnet3_hw *hw = dev->data->dev_private;
798 	Vmxnet3_DriverShared *shared = hw->shared;
799 	Vmxnet3_CmdInfo *cmdInfo;
800 	struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES];
801 	uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES];
802 	uint32_t num, i, j, size;
803 
804 	if (hw->memRegsPA == 0) {
805 		const struct rte_memzone *mz;
806 
807 		size = sizeof(Vmxnet3_MemRegs) +
808 			(VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) *
809 			sizeof(Vmxnet3_MemoryRegion);
810 
811 		mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8,
812 				      1);
813 		if (mz == NULL) {
814 			PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone");
815 			return -ENOMEM;
816 		}
817 		memset(mz->addr, 0, mz->len);
818 		hw->memRegs = mz->addr;
819 		hw->memRegsPA = mz->iova;
820 	}
821 
822 	num = hw->num_rx_queues;
823 
824 	for (i = 0; i < num; i++) {
825 		vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
826 
827 		mp[i] = rxq->mp;
828 		index[i] = 1 << i;
829 	}
830 
831 	/*
832 	 * The same mempool could be used by multiple queues. In such a case,
833 	 * remove duplicate mempool entries. Only one entry is kept with
834 	 * bitmask indicating queues that are using this mempool.
835 	 */
836 	for (i = 1; i < num; i++) {
837 		for (j = 0; j < i; j++) {
838 			if (mp[i] == mp[j]) {
839 				mp[i] = NULL;
840 				index[j] |= 1 << i;
841 				break;
842 			}
843 		}
844 	}
845 
846 	j = 0;
847 	for (i = 0; i < num; i++) {
848 		if (mp[i] == NULL)
849 			continue;
850 
851 		Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
852 
853 		mr->startPA =
854 			(uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->iova;
855 		mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
856 			STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
857 		mr->txQueueBits = index[i];
858 		mr->rxQueueBits = index[i];
859 
860 		PMD_INIT_LOG(INFO,
861 			     "index: %u startPA: %" PRIu64 " length: %u, "
862 			     "rxBits: %x",
863 			     j, mr->startPA, mr->length, mr->rxQueueBits);
864 		j++;
865 	}
866 	hw->memRegs->numRegs = j;
867 	PMD_INIT_LOG(INFO, "numRegs: %u", j);
868 
869 	size = sizeof(Vmxnet3_MemRegs) +
870 		(j - 1) * sizeof(Vmxnet3_MemoryRegion);
871 
872 	cmdInfo = &shared->cu.cmdInfo;
873 	cmdInfo->varConf.confVer = 1;
874 	cmdInfo->varConf.confLen = size;
875 	cmdInfo->varConf.confPA = hw->memRegsPA;
876 
877 	return 0;
878 }
879 
880 static int
881 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
882 {
883 	struct rte_eth_conf port_conf = dev->data->dev_conf;
884 	struct vmxnet3_hw *hw = dev->data->dev_private;
885 	struct rte_intr_handle *intr_handle = dev->intr_handle;
886 	uint32_t mtu = dev->data->mtu;
887 	Vmxnet3_DriverShared *shared = hw->shared;
888 	Vmxnet3_DSDevRead *devRead = &shared->devRead;
889 	struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
890 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
891 	uint32_t i;
892 	int ret;
893 
894 	hw->mtu = mtu;
895 
896 	shared->magic = VMXNET3_REV1_MAGIC;
897 	devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
898 
899 	/* Setting up Guest OS information */
900 	devRead->misc.driverInfo.gos.gosBits   = sizeof(void *) == 4 ?
901 		VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64;
902 	devRead->misc.driverInfo.gos.gosType   = VMXNET3_GOS_TYPE_LINUX;
903 	devRead->misc.driverInfo.vmxnet3RevSpt = 1;
904 	devRead->misc.driverInfo.uptVerSpt     = 1;
905 
906 	devRead->misc.mtu = rte_le_to_cpu_32(mtu);
907 	devRead->misc.queueDescPA  = hw->queueDescPA;
908 	devRead->misc.queueDescLen = hw->queue_desc_len;
909 	devRead->misc.numTxQueues  = hw->num_tx_queues;
910 	devRead->misc.numRxQueues  = hw->num_rx_queues;
911 
912 	for (i = 0; i < hw->num_tx_queues; i++) {
913 		Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
914 		vmxnet3_tx_queue_t *txq  = dev->data->tx_queues[i];
915 
916 		txq->shared = &hw->tqd_start[i];
917 
918 		tqd->ctrl.txNumDeferred  = 0;
919 		tqd->ctrl.txThreshold    = 1;
920 		tqd->conf.txRingBasePA   = txq->cmd_ring.basePA;
921 		tqd->conf.compRingBasePA = txq->comp_ring.basePA;
922 		tqd->conf.dataRingBasePA = txq->data_ring.basePA;
923 
924 		tqd->conf.txRingSize   = txq->cmd_ring.size;
925 		tqd->conf.compRingSize = txq->comp_ring.size;
926 		tqd->conf.dataRingSize = txq->data_ring.size;
927 		tqd->conf.txDataRingDescSize = txq->txdata_desc_size;
928 
929 		if (hw->intr.lsc_only)
930 			tqd->conf.intrIdx = 1;
931 		else
932 			tqd->conf.intrIdx =
933 				rte_intr_vec_list_index_get(intr_handle,
934 								   i);
935 		tqd->status.stopped = TRUE;
936 		tqd->status.error   = 0;
937 		memset(&tqd->stats, 0, sizeof(tqd->stats));
938 	}
939 
940 	for (i = 0; i < hw->num_rx_queues; i++) {
941 		Vmxnet3_RxQueueDesc *rqd  = &hw->rqd_start[i];
942 		vmxnet3_rx_queue_t *rxq   = dev->data->rx_queues[i];
943 
944 		rxq->shared = &hw->rqd_start[i];
945 
946 		rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
947 		rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
948 		rqd->conf.compRingBasePA  = rxq->comp_ring.basePA;
949 
950 		rqd->conf.rxRingSize[0]   = rxq->cmd_ring[0].size;
951 		rqd->conf.rxRingSize[1]   = rxq->cmd_ring[1].size;
952 		rqd->conf.compRingSize    = rxq->comp_ring.size;
953 
954 		if (VMXNET3_VERSION_GE_3(hw)) {
955 			rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA;
956 			rqd->conf.rxDataRingDescSize = rxq->data_desc_size;
957 		}
958 
959 		if (hw->intr.lsc_only)
960 			rqd->conf.intrIdx = 1;
961 		else
962 			rqd->conf.intrIdx =
963 				rte_intr_vec_list_index_get(intr_handle,
964 								   i);
965 		rqd->status.stopped = TRUE;
966 		rqd->status.error   = 0;
967 		memset(&rqd->stats, 0, sizeof(rqd->stats));
968 	}
969 
970 	/* intr settings */
971 	if (VMXNET3_VERSION_GE_6(hw) && hw->queuesExtEnabled) {
972 		devReadExt->intrConfExt.autoMask = hw->intr.mask_mode ==
973 						   VMXNET3_IMM_AUTO;
974 		devReadExt->intrConfExt.numIntrs = hw->intr.num_intrs;
975 		for (i = 0; i < hw->intr.num_intrs; i++)
976 			devReadExt->intrConfExt.modLevels[i] =
977 				hw->intr.mod_levels[i];
978 
979 		devReadExt->intrConfExt.eventIntrIdx = hw->intr.event_intr_idx;
980 		devReadExt->intrConfExt.intrCtrl |=
981 			rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
982 	} else {
983 		devRead->intrConf.autoMask = hw->intr.mask_mode ==
984 					     VMXNET3_IMM_AUTO;
985 		devRead->intrConf.numIntrs = hw->intr.num_intrs;
986 		for (i = 0; i < hw->intr.num_intrs; i++)
987 			devRead->intrConf.modLevels[i] = hw->intr.mod_levels[i];
988 
989 		devRead->intrConf.eventIntrIdx = hw->intr.event_intr_idx;
990 		devRead->intrConf.intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
991 	}
992 
993 	/* RxMode set to 0 of VMXNET3_RXM_xxx */
994 	devRead->rxFilterConf.rxMode = 0;
995 
996 	/* Setting up feature flags */
997 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
998 		devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
999 
1000 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1001 		devRead->misc.uptFeatures |= VMXNET3_F_LRO;
1002 		devRead->misc.maxNumRxSG = 0;
1003 	}
1004 
1005 	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
1006 		ret = vmxnet3_rss_configure(dev);
1007 		if (ret != VMXNET3_SUCCESS)
1008 			return ret;
1009 
1010 		devRead->misc.uptFeatures |= VMXNET3_F_RSS;
1011 		devRead->rssConfDesc.confVer = 1;
1012 		devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
1013 		devRead->rssConfDesc.confPA  = hw->rss_confPA;
1014 	}
1015 
1016 	ret = vmxnet3_dev_vlan_offload_set(dev,
1017 			RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
1018 	if (ret)
1019 		return ret;
1020 
1021 	vmxnet3_write_mac(hw, dev->data->mac_addrs->addr_bytes);
1022 
1023 	return VMXNET3_SUCCESS;
1024 }
1025 
1026 static void
1027 vmxnet3_init_bufsize(struct vmxnet3_hw *hw)
1028 {
1029 	struct Vmxnet3_DriverShared *shared = hw->shared;
1030 	union Vmxnet3_CmdInfo *cmd_info = &shared->cu.cmdInfo;
1031 
1032 	if (!VMXNET3_VERSION_GE_7(hw))
1033 		return;
1034 
1035 	cmd_info->ringBufSize.ring1BufSizeType0 = hw->rxdata_buf_size;
1036 	cmd_info->ringBufSize.ring1BufSizeType1 = 0;
1037 	cmd_info->ringBufSize.ring2BufSizeType1 = hw->rxdata_buf_size;
1038 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1039 			       VMXNET3_CMD_SET_RING_BUFFER_SIZE);
1040 }
1041 
1042 /*
1043  * Configure device link speed and setup link.
1044  * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
1045  * It returns 0 on success.
1046  */
1047 static int
1048 vmxnet3_dev_start(struct rte_eth_dev *dev)
1049 {
1050 	int ret;
1051 	struct vmxnet3_hw *hw = dev->data->dev_private;
1052 	uint16_t i;
1053 
1054 	PMD_INIT_FUNC_TRACE();
1055 
1056 	/* Save stats before it is reset by CMD_ACTIVATE */
1057 	vmxnet3_hw_stats_save(hw);
1058 
1059 	/* configure MSI-X */
1060 	ret = vmxnet3_configure_msix(dev);
1061 	if (ret < 0) {
1062 		/* revert to lsc only */
1063 		hw->intr.num_intrs = 2;
1064 		hw->intr.lsc_only = TRUE;
1065 	}
1066 
1067 	ret = vmxnet3_setup_driver_shared(dev);
1068 	if (ret != VMXNET3_SUCCESS)
1069 		return ret;
1070 
1071 	/* Exchange shared data with device */
1072 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
1073 			       VMXNET3_GET_ADDR_LO(hw->sharedPA));
1074 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
1075 			       VMXNET3_GET_ADDR_HI(hw->sharedPA));
1076 
1077 	/* Activate device by register write */
1078 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
1079 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1080 
1081 	if (ret != 0) {
1082 		PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
1083 		return -EINVAL;
1084 	}
1085 
1086 	/* Check memregs restrictions first */
1087 	if (dev->data->nb_rx_queues <= VMXNET3_MAX_RX_QUEUES &&
1088 	    dev->data->nb_tx_queues <= VMXNET3_MAX_TX_QUEUES) {
1089 		ret = vmxnet3_dev_setup_memreg(dev);
1090 		if (ret == 0) {
1091 			VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1092 					VMXNET3_CMD_REGISTER_MEMREGS);
1093 			ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1094 			if (ret != 0)
1095 				PMD_INIT_LOG(DEBUG,
1096 					"Failed in setup memory region cmd\n");
1097 			ret = 0;
1098 		} else {
1099 			PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
1100 		}
1101 	} else {
1102 		PMD_INIT_LOG(WARNING, "Memregs can't init (rx: %d, tx: %d)",
1103 			     dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1104 	}
1105 
1106 	if (VMXNET3_VERSION_GE_4(hw) &&
1107 	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
1108 		/* Check for additional RSS  */
1109 		ret = vmxnet3_v4_rss_configure(dev);
1110 		if (ret != VMXNET3_SUCCESS) {
1111 			PMD_INIT_LOG(ERR, "Failed to configure v4 RSS");
1112 			return ret;
1113 		}
1114 	}
1115 
1116 	/*
1117 	 * Load RX queues with blank mbufs and update next2fill index for device
1118 	 * Update RxMode of the device
1119 	 */
1120 	ret = vmxnet3_dev_rxtx_init(dev);
1121 	if (ret != VMXNET3_SUCCESS) {
1122 		PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL");
1123 		return ret;
1124 	}
1125 
1126 	vmxnet3_init_bufsize(hw);
1127 
1128 	hw->adapter_stopped = FALSE;
1129 
1130 	/* Setting proper Rx Mode and issue Rx Mode Update command */
1131 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
1132 
1133 	/* Setup interrupt callback  */
1134 	rte_intr_callback_register(dev->intr_handle,
1135 				   vmxnet3_interrupt_handler, dev);
1136 
1137 	if (rte_intr_enable(dev->intr_handle) < 0) {
1138 		PMD_INIT_LOG(ERR, "interrupt enable failed");
1139 		return -EIO;
1140 	}
1141 
1142 	/* enable all intrs */
1143 	vmxnet3_enable_all_intrs(hw);
1144 
1145 	vmxnet3_process_events(dev);
1146 
1147 	/*
1148 	 * Update link state from device since this won't be
1149 	 * done upon starting with lsc in use. This is done
1150 	 * only after enabling interrupts to avoid any race
1151 	 * where the link state could change without an
1152 	 * interrupt being fired.
1153 	 */
1154 	__vmxnet3_dev_link_update(dev, 0);
1155 
1156 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1157 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
1158 	for (i = 0; i < dev->data->nb_tx_queues; i++)
1159 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
1160 
1161 	return VMXNET3_SUCCESS;
1162 }
1163 
1164 /*
1165  * Stop device: disable rx and tx functions to allow for reconfiguring.
1166  */
1167 static int
1168 vmxnet3_dev_stop(struct rte_eth_dev *dev)
1169 {
1170 	struct rte_eth_link link;
1171 	struct vmxnet3_hw *hw = dev->data->dev_private;
1172 	struct rte_intr_handle *intr_handle = dev->intr_handle;
1173 	uint16_t i;
1174 	int ret;
1175 
1176 	PMD_INIT_FUNC_TRACE();
1177 
1178 	if (hw->adapter_stopped == 1) {
1179 		PMD_INIT_LOG(DEBUG, "Device already stopped.");
1180 		return 0;
1181 	}
1182 
1183 	do {
1184 		/* Unregister has lock to make sure there is no running cb.
1185 		 * This has to happen first since vmxnet3_interrupt_handler
1186 		 * reenables interrupts by calling vmxnet3_enable_intr
1187 		 */
1188 		ret = rte_intr_callback_unregister(intr_handle,
1189 						   vmxnet3_interrupt_handler,
1190 						   (void *)-1);
1191 	} while (ret == -EAGAIN);
1192 
1193 	if (ret < 0)
1194 		PMD_DRV_LOG(ERR, "Error attempting to unregister intr cb: %d",
1195 			    ret);
1196 
1197 	PMD_INIT_LOG(DEBUG, "Disabled %d intr callbacks", ret);
1198 
1199 	/* disable interrupts */
1200 	vmxnet3_disable_all_intrs(hw);
1201 
1202 	rte_intr_disable(intr_handle);
1203 
1204 	/* Clean datapath event and queue/vector mapping */
1205 	rte_intr_efd_disable(intr_handle);
1206 	rte_intr_vec_list_free(intr_handle);
1207 
1208 	/* quiesce the device first */
1209 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
1210 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
1211 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
1212 
1213 	/* reset the device */
1214 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
1215 	PMD_INIT_LOG(DEBUG, "Device reset.");
1216 
1217 	vmxnet3_dev_clear_queues(dev);
1218 
1219 	/* Clear recorded link status */
1220 	memset(&link, 0, sizeof(link));
1221 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1222 	link.link_speed = RTE_ETH_SPEED_NUM_10G;
1223 	link.link_autoneg = RTE_ETH_LINK_FIXED;
1224 	rte_eth_linkstatus_set(dev, &link);
1225 
1226 	hw->adapter_stopped = 1;
1227 	dev->data->dev_started = 0;
1228 
1229 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1230 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1231 	for (i = 0; i < dev->data->nb_tx_queues; i++)
1232 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1233 
1234 	return 0;
1235 }
1236 
1237 static void
1238 vmxnet3_free_queues(struct rte_eth_dev *dev)
1239 {
1240 	int i;
1241 
1242 	PMD_INIT_FUNC_TRACE();
1243 
1244 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1245 		vmxnet3_dev_rx_queue_release(dev, i);
1246 	dev->data->nb_rx_queues = 0;
1247 
1248 	for (i = 0; i < dev->data->nb_tx_queues; i++)
1249 		vmxnet3_dev_tx_queue_release(dev, i);
1250 	dev->data->nb_tx_queues = 0;
1251 }
1252 
1253 /*
1254  * Reset and stop device.
1255  */
1256 static int
1257 vmxnet3_dev_close(struct rte_eth_dev *dev)
1258 {
1259 	int ret;
1260 	PMD_INIT_FUNC_TRACE();
1261 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1262 		return 0;
1263 
1264 	ret = vmxnet3_dev_stop(dev);
1265 	vmxnet3_free_queues(dev);
1266 
1267 	return ret;
1268 }
1269 
1270 static int
1271 vmxnet3_dev_reset(struct rte_eth_dev *dev)
1272 {
1273 	int ret;
1274 
1275 	ret = eth_vmxnet3_dev_uninit(dev);
1276 	if (ret)
1277 		return ret;
1278 	ret = eth_vmxnet3_dev_init(dev);
1279 	return ret;
1280 }
1281 
1282 static void
1283 vmxnet3_hw_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1284 			struct UPT1_TxStats *res)
1285 {
1286 #define VMXNET3_UPDATE_TX_STAT(h, i, f, r)		\
1287 		((r)->f = (h)->tqd_start[(i)].stats.f +	\
1288 			(h)->saved_tx_stats[(i)].f)
1289 
1290 	VMXNET3_UPDATE_TX_STAT(hw, q, ucastPktsTxOK, res);
1291 	VMXNET3_UPDATE_TX_STAT(hw, q, mcastPktsTxOK, res);
1292 	VMXNET3_UPDATE_TX_STAT(hw, q, bcastPktsTxOK, res);
1293 	VMXNET3_UPDATE_TX_STAT(hw, q, ucastBytesTxOK, res);
1294 	VMXNET3_UPDATE_TX_STAT(hw, q, mcastBytesTxOK, res);
1295 	VMXNET3_UPDATE_TX_STAT(hw, q, bcastBytesTxOK, res);
1296 	VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxError, res);
1297 	VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxDiscard, res);
1298 
1299 #undef VMXNET3_UPDATE_TX_STAT
1300 }
1301 
1302 static void
1303 vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1304 			struct UPT1_RxStats *res)
1305 {
1306 #define VMXNET3_UPDATE_RX_STAT(h, i, f, r)		\
1307 		((r)->f = (h)->rqd_start[(i)].stats.f +	\
1308 			(h)->saved_rx_stats[(i)].f)
1309 
1310 	VMXNET3_UPDATE_RX_STAT(hw, q, ucastPktsRxOK, res);
1311 	VMXNET3_UPDATE_RX_STAT(hw, q, mcastPktsRxOK, res);
1312 	VMXNET3_UPDATE_RX_STAT(hw, q, bcastPktsRxOK, res);
1313 	VMXNET3_UPDATE_RX_STAT(hw, q, ucastBytesRxOK, res);
1314 	VMXNET3_UPDATE_RX_STAT(hw, q, mcastBytesRxOK, res);
1315 	VMXNET3_UPDATE_RX_STAT(hw, q, bcastBytesRxOK, res);
1316 	VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res);
1317 	VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res);
1318 
1319 #undef VMXNET3_UPDATE_RX_STAT
1320 }
1321 
1322 static void
1323 vmxnet3_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1324 					struct UPT1_TxStats *res)
1325 {
1326 		vmxnet3_hw_tx_stats_get(hw, q, res);
1327 
1328 #define VMXNET3_REDUCE_SNAPSHOT_TX_STAT(h, i, f, r)	\
1329 		((r)->f -= (h)->snapshot_tx_stats[(i)].f)
1330 
1331 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastPktsTxOK, res);
1332 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastPktsTxOK, res);
1333 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastPktsTxOK, res);
1334 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastBytesTxOK, res);
1335 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastBytesTxOK, res);
1336 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastBytesTxOK, res);
1337 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxError, res);
1338 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxDiscard, res);
1339 
1340 #undef VMXNET3_REDUCE_SNAPSHOT_TX_STAT
1341 }
1342 
1343 static void
1344 vmxnet3_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1345 					struct UPT1_RxStats *res)
1346 {
1347 		vmxnet3_hw_rx_stats_get(hw, q, res);
1348 
1349 #define VMXNET3_REDUCE_SNAPSHOT_RX_STAT(h, i, f, r)	\
1350 		((r)->f -= (h)->snapshot_rx_stats[(i)].f)
1351 
1352 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastPktsRxOK, res);
1353 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastPktsRxOK, res);
1354 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastPktsRxOK, res);
1355 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastBytesRxOK, res);
1356 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastBytesRxOK, res);
1357 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastBytesRxOK, res);
1358 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxError, res);
1359 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxOutOfBuf, res);
1360 
1361 #undef VMXNET3_REDUCE_SNAPSHOT_RX_STAT
1362 }
1363 
1364 static void
1365 vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
1366 {
1367 	unsigned int i;
1368 
1369 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1370 
1371 	for (i = 0; i < hw->num_tx_queues; i++)
1372 		vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
1373 	for (i = 0; i < hw->num_rx_queues; i++)
1374 		vmxnet3_hw_rx_stats_get(hw, i, &hw->saved_rx_stats[i]);
1375 }
1376 
1377 static int
1378 vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
1379 			     struct rte_eth_xstat_name *xstats_names,
1380 			     unsigned int n)
1381 {
1382 	unsigned int i, t, count = 0;
1383 	unsigned int nstats =
1384 		dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
1385 		dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
1386 
1387 	if (!xstats_names || n < nstats)
1388 		return nstats;
1389 
1390 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1391 		if (!dev->data->rx_queues[i])
1392 			continue;
1393 
1394 		for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
1395 			snprintf(xstats_names[count].name,
1396 				 sizeof(xstats_names[count].name),
1397 				 "rx_q%u_%s", i,
1398 				 vmxnet3_rxq_stat_strings[t].name);
1399 			count++;
1400 		}
1401 	}
1402 
1403 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1404 		if (!dev->data->tx_queues[i])
1405 			continue;
1406 
1407 		for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
1408 			snprintf(xstats_names[count].name,
1409 				 sizeof(xstats_names[count].name),
1410 				 "tx_q%u_%s", i,
1411 				 vmxnet3_txq_stat_strings[t].name);
1412 			count++;
1413 		}
1414 	}
1415 
1416 	return count;
1417 }
1418 
1419 static int
1420 vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1421 		       unsigned int n)
1422 {
1423 	unsigned int i, t, count = 0;
1424 	unsigned int nstats =
1425 		dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
1426 		dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
1427 
1428 	if (n < nstats)
1429 		return nstats;
1430 
1431 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1432 		struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
1433 
1434 		if (rxq == NULL)
1435 			continue;
1436 
1437 		for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
1438 			xstats[count].value = *(uint64_t *)(((char *)&rxq->stats) +
1439 				vmxnet3_rxq_stat_strings[t].offset);
1440 			xstats[count].id = count;
1441 			count++;
1442 		}
1443 	}
1444 
1445 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1446 		struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
1447 
1448 		if (txq == NULL)
1449 			continue;
1450 
1451 		for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
1452 			xstats[count].value = *(uint64_t *)(((char *)&txq->stats) +
1453 				vmxnet3_txq_stat_strings[t].offset);
1454 			xstats[count].id = count;
1455 			count++;
1456 		}
1457 	}
1458 
1459 	return count;
1460 }
1461 
1462 static int
1463 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1464 {
1465 	unsigned int i;
1466 	struct vmxnet3_hw *hw = dev->data->dev_private;
1467 	struct UPT1_TxStats txStats;
1468 	struct UPT1_RxStats rxStats;
1469 
1470 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1471 
1472 	for (i = 0; i < hw->num_tx_queues; i++) {
1473 		vmxnet3_tx_stats_get(hw, i, &txStats);
1474 
1475 		stats->q_opackets[i] = txStats.ucastPktsTxOK +
1476 			txStats.mcastPktsTxOK +
1477 			txStats.bcastPktsTxOK;
1478 
1479 		stats->q_obytes[i] = txStats.ucastBytesTxOK +
1480 			txStats.mcastBytesTxOK +
1481 			txStats.bcastBytesTxOK;
1482 
1483 		stats->opackets += stats->q_opackets[i];
1484 		stats->obytes += stats->q_obytes[i];
1485 		stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
1486 	}
1487 
1488 	for (i = 0; i < hw->num_rx_queues; i++) {
1489 		vmxnet3_rx_stats_get(hw, i, &rxStats);
1490 
1491 		stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
1492 			rxStats.mcastPktsRxOK +
1493 			rxStats.bcastPktsRxOK;
1494 
1495 		stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
1496 			rxStats.mcastBytesRxOK +
1497 			rxStats.bcastBytesRxOK;
1498 
1499 		stats->ipackets += stats->q_ipackets[i];
1500 		stats->ibytes += stats->q_ibytes[i];
1501 
1502 		stats->q_errors[i] = rxStats.pktsRxError;
1503 		stats->ierrors += rxStats.pktsRxError;
1504 		stats->imissed += rxStats.pktsRxOutOfBuf;
1505 	}
1506 
1507 	return 0;
1508 }
1509 
1510 static int
1511 vmxnet3_dev_stats_reset(struct rte_eth_dev *dev)
1512 {
1513 	unsigned int i;
1514 	struct vmxnet3_hw *hw = dev->data->dev_private;
1515 	struct UPT1_TxStats txStats = {0};
1516 	struct UPT1_RxStats rxStats = {0};
1517 
1518 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1519 
1520 	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1521 
1522 	for (i = 0; i < hw->num_tx_queues; i++) {
1523 		vmxnet3_hw_tx_stats_get(hw, i, &txStats);
1524 		memcpy(&hw->snapshot_tx_stats[i], &txStats,
1525 			sizeof(hw->snapshot_tx_stats[0]));
1526 	}
1527 	for (i = 0; i < hw->num_rx_queues; i++) {
1528 		vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
1529 		memcpy(&hw->snapshot_rx_stats[i], &rxStats,
1530 			sizeof(hw->snapshot_rx_stats[0]));
1531 	}
1532 
1533 	return 0;
1534 }
1535 
1536 static int
1537 vmxnet3_dev_info_get(struct rte_eth_dev *dev,
1538 		     struct rte_eth_dev_info *dev_info)
1539 {
1540 	struct vmxnet3_hw *hw = dev->data->dev_private;
1541 	int queues = 0;
1542 
1543 	if (VMXNET3_VERSION_GE_6(hw)) {
1544 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1545 				       VMXNET3_CMD_GET_MAX_QUEUES_CONF);
1546 		queues = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1547 
1548 		if (queues > 0) {
1549 			dev_info->max_rx_queues =
1550 			  RTE_MIN(VMXNET3_EXT_MAX_RX_QUEUES, ((queues >> 8) & 0xff));
1551 			dev_info->max_tx_queues =
1552 			  RTE_MIN(VMXNET3_EXT_MAX_TX_QUEUES, (queues & 0xff));
1553 		} else {
1554 			dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
1555 			dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
1556 		}
1557 	} else {
1558 		dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
1559 		dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
1560 	}
1561 
1562 	dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
1563 	dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
1564 	dev_info->min_mtu = VMXNET3_MIN_MTU;
1565 	dev_info->max_mtu = VMXNET3_MAX_MTU;
1566 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
1567 	dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
1568 
1569 	dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
1570 
1571 	if (VMXNET3_VERSION_GE_4(hw)) {
1572 		dev_info->flow_type_rss_offloads |= VMXNET3_V4_RSS_MASK;
1573 	}
1574 
1575 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1576 		.nb_max = VMXNET3_RX_RING_MAX_SIZE,
1577 		.nb_min = VMXNET3_DEF_RX_RING_SIZE,
1578 		.nb_align = 1,
1579 	};
1580 
1581 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1582 		.nb_max = VMXNET3_TX_RING_MAX_SIZE,
1583 		.nb_min = VMXNET3_DEF_TX_RING_SIZE,
1584 		.nb_align = 1,
1585 		.nb_seg_max = VMXNET3_TX_MAX_SEG,
1586 		.nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
1587 	};
1588 
1589 	dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP;
1590 	dev_info->rx_queue_offload_capa = 0;
1591 	dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP;
1592 	dev_info->tx_queue_offload_capa = 0;
1593 	if (hw->rss_conf == NULL) {
1594 		/* RSS not configured */
1595 		dev_info->reta_size = 0;
1596 	} else {
1597 		dev_info->reta_size = hw->rss_conf->indTableSize;
1598 	}
1599 	return 0;
1600 }
1601 
1602 static int
1603 vmxnet3_hw_ver_get(struct rte_eth_dev *dev,
1604 		   char *fw_version, size_t fw_size)
1605 {
1606 	int ret;
1607 	struct vmxnet3_hw *hw = dev->data->dev_private;
1608 
1609 	ret = snprintf(fw_version, fw_size, "v%d", hw->version);
1610 
1611 	ret += 1; /* add the size of '\0' */
1612 	if (fw_size < (uint32_t)ret)
1613 		return ret;
1614 	else
1615 		return 0;
1616 }
1617 
1618 static const uint32_t *
1619 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1620 {
1621 	static const uint32_t ptypes[] = {
1622 		RTE_PTYPE_L3_IPV4_EXT,
1623 		RTE_PTYPE_L3_IPV4,
1624 		RTE_PTYPE_UNKNOWN
1625 	};
1626 
1627 	if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
1628 		return ptypes;
1629 	return NULL;
1630 }
1631 
1632 static int
1633 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1634 {
1635 	struct vmxnet3_hw *hw = dev->data->dev_private;
1636 
1637 	rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)(hw->perm_addr));
1638 	vmxnet3_write_mac(hw, mac_addr->addr_bytes);
1639 	return 0;
1640 }
1641 
1642 static int
1643 vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1644 {
1645 	struct vmxnet3_hw *hw = dev->data->dev_private;
1646 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
1647 
1648 	if (mtu < VMXNET3_MIN_MTU)
1649 		return -EINVAL;
1650 
1651 	if (VMXNET3_VERSION_GE_6(hw)) {
1652 		if (mtu > VMXNET3_V6_MAX_MTU)
1653 			return -EINVAL;
1654 	} else {
1655 		if (mtu > VMXNET3_MAX_MTU) {
1656 			PMD_DRV_LOG(ERR, "MTU %d too large in device version v%d",
1657 				    mtu, hw->version);
1658 			return -EINVAL;
1659 		}
1660 	}
1661 
1662 	dev->data->mtu = mtu;
1663 	/* update max frame size */
1664 	dev->data->dev_conf.rxmode.mtu = frame_size;
1665 
1666 	if (dev->data->dev_started == 0)
1667 		return 0;
1668 
1669     /* changing mtu for vmxnet3 pmd does not require a restart
1670      * as it does not need to repopulate the rx rings to support
1671      * different mtu size.  We stop and restart the device here
1672      * just to pass the mtu info to the backend.
1673      */
1674 	vmxnet3_dev_stop(dev);
1675 	vmxnet3_dev_start(dev);
1676 
1677 	return 0;
1678 }
1679 
1680 /* return 0 means link status changed, -1 means not changed */
1681 static int
1682 __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
1683 			  __rte_unused int wait_to_complete)
1684 {
1685 	struct vmxnet3_hw *hw = dev->data->dev_private;
1686 	struct rte_eth_link link;
1687 	uint32_t ret;
1688 
1689 	memset(&link, 0, sizeof(link));
1690 
1691 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
1692 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1693 
1694 	if (ret & 0x1)
1695 		link.link_status = RTE_ETH_LINK_UP;
1696 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1697 	link.link_speed = RTE_ETH_SPEED_NUM_10G;
1698 	link.link_autoneg = RTE_ETH_LINK_FIXED;
1699 
1700 	return rte_eth_linkstatus_set(dev, &link);
1701 }
1702 
1703 static int
1704 vmxnet3_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1705 {
1706 	/* Link status doesn't change for stopped dev */
1707 	if (dev->data->dev_started == 0)
1708 		return -1;
1709 
1710 	return __vmxnet3_dev_link_update(dev, wait_to_complete);
1711 }
1712 
1713 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
1714 static void
1715 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
1716 {
1717 	struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1718 
1719 	if (set)
1720 		rxConf->rxMode = rxConf->rxMode | feature;
1721 	else
1722 		rxConf->rxMode = rxConf->rxMode & (~feature);
1723 
1724 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
1725 }
1726 
1727 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1728 static int
1729 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
1730 {
1731 	struct vmxnet3_hw *hw = dev->data->dev_private;
1732 	uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1733 
1734 	memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
1735 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
1736 
1737 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1738 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1739 
1740 	return 0;
1741 }
1742 
1743 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1744 static int
1745 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
1746 {
1747 	struct vmxnet3_hw *hw = dev->data->dev_private;
1748 	uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1749 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1750 
1751 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1752 		memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1753 	else
1754 		memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1755 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
1756 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1757 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1758 
1759 	return 0;
1760 }
1761 
1762 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1763 static int
1764 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
1765 {
1766 	struct vmxnet3_hw *hw = dev->data->dev_private;
1767 
1768 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
1769 
1770 	return 0;
1771 }
1772 
1773 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1774 static int
1775 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
1776 {
1777 	struct vmxnet3_hw *hw = dev->data->dev_private;
1778 
1779 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
1780 
1781 	return 0;
1782 }
1783 
1784 /* Enable/disable filter on vlan */
1785 static int
1786 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
1787 {
1788 	struct vmxnet3_hw *hw = dev->data->dev_private;
1789 	struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1790 	uint32_t *vf_table = rxConf->vfTable;
1791 
1792 	/* save state for restore */
1793 	if (on)
1794 		VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1795 	else
1796 		VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1797 
1798 	/* don't change active filter if in promiscuous mode */
1799 	if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
1800 		return 0;
1801 
1802 	/* set in hardware */
1803 	if (on)
1804 		VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
1805 	else
1806 		VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
1807 
1808 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1809 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1810 	return 0;
1811 }
1812 
1813 static int
1814 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1815 {
1816 	struct vmxnet3_hw *hw = dev->data->dev_private;
1817 	Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
1818 	uint32_t *vf_table = devRead->rxFilterConf.vfTable;
1819 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1820 
1821 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1822 		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1823 			devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1824 		else
1825 			devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1826 
1827 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1828 				       VMXNET3_CMD_UPDATE_FEATURE);
1829 	}
1830 
1831 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1832 		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1833 			memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1834 		else
1835 			memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1836 
1837 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1838 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1839 	}
1840 
1841 	return 0;
1842 }
1843 
1844 static void
1845 vmxnet3_process_events(struct rte_eth_dev *dev)
1846 {
1847 	struct vmxnet3_hw *hw = dev->data->dev_private;
1848 	uint32_t events = hw->shared->ecr;
1849 	int i;
1850 
1851 	if (!events)
1852 		return;
1853 
1854 	/*
1855 	 * ECR bits when written with 1b are cleared. Hence write
1856 	 * events back to ECR so that the bits which were set will be reset.
1857 	 */
1858 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
1859 
1860 	/* Check if link state has changed */
1861 	if (events & VMXNET3_ECR_LINK) {
1862 		PMD_DRV_LOG(DEBUG, "Process events: VMXNET3_ECR_LINK event");
1863 		if (vmxnet3_dev_link_update(dev, 0) == 0)
1864 			rte_eth_dev_callback_process(dev,
1865 						     RTE_ETH_EVENT_INTR_LSC,
1866 						     NULL);
1867 	}
1868 
1869 	/* Check if there is an error on xmit/recv queues */
1870 	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
1871 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1872 				       VMXNET3_CMD_GET_QUEUE_STATUS);
1873 
1874 		PMD_DRV_LOG(ERR, "queue error event 0x%x for "
1875 			    RTE_ETHER_ADDR_PRT_FMT, events,
1876 			    hw->perm_addr[0], hw->perm_addr[1],
1877 			    hw->perm_addr[2], hw->perm_addr[3],
1878 			    hw->perm_addr[4], hw->perm_addr[5]);
1879 
1880 		for (i = 0; i < hw->num_tx_queues; i++) {
1881 			if (hw->tqd_start[i].status.stopped)
1882 				PMD_DRV_LOG(ERR, "tq %d error 0x%x",
1883 					    i, hw->tqd_start[i].status.error);
1884 		}
1885 		for (i = 0; i < hw->num_rx_queues; i++) {
1886 			if (hw->rqd_start[i].status.stopped)
1887 				PMD_DRV_LOG(ERR, "rq %d error 0x%x",
1888 					    i, hw->rqd_start[i].status.error);
1889 		}
1890 
1891 		/* Have to reset the device */
1892 		/* Notify the application so that it can reset the device */
1893 		rte_eth_dev_callback_process(dev,
1894 					     RTE_ETH_EVENT_INTR_RESET,
1895 					     NULL);
1896 	}
1897 
1898 	if (events & VMXNET3_ECR_DIC)
1899 		PMD_DRV_LOG(DEBUG, "Device implementation change event.");
1900 
1901 	if (events & VMXNET3_ECR_DEBUG)
1902 		PMD_DRV_LOG(DEBUG, "Debug event generated by device.");
1903 }
1904 
1905 static void
1906 vmxnet3_interrupt_handler(void *param)
1907 {
1908 	struct rte_eth_dev *dev = param;
1909 	struct vmxnet3_hw *hw = dev->data->dev_private;
1910 	uint32_t events;
1911 	uint8 *eventIntrIdx;
1912 	uint32 *intrCtrl;
1913 
1914 	PMD_INIT_FUNC_TRACE();
1915 
1916 	vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl);
1917 	vmxnet3_disable_intr(hw, *eventIntrIdx);
1918 
1919 	events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
1920 	if (events == 0)
1921 		goto done;
1922 
1923 	PMD_DRV_LOG(DEBUG, "Reading events: 0x%X", events);
1924 	vmxnet3_process_events(dev);
1925 done:
1926 	vmxnet3_enable_intr(hw, *eventIntrIdx);
1927 }
1928 
1929 static int
1930 vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1931 {
1932 	struct vmxnet3_hw *hw = dev->data->dev_private;
1933 
1934 	vmxnet3_enable_intr(hw,
1935 			    rte_intr_vec_list_index_get(dev->intr_handle,
1936 							       queue_id));
1937 
1938 	return 0;
1939 }
1940 
1941 static int
1942 vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1943 {
1944 	struct vmxnet3_hw *hw = dev->data->dev_private;
1945 
1946 	vmxnet3_disable_intr(hw,
1947 		rte_intr_vec_list_index_get(dev->intr_handle, queue_id));
1948 
1949 	return 0;
1950 }
1951 
1952 RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
1953 RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
1954 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
1955 RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_init, init, NOTICE);
1956 RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_driver, driver, NOTICE);
1957 
1958 static int
1959 vmxnet3_rss_reta_update(struct rte_eth_dev *dev,
1960 			struct rte_eth_rss_reta_entry64 *reta_conf,
1961 			uint16_t reta_size)
1962 {
1963 	int i, idx, shift;
1964 	struct vmxnet3_hw *hw = dev->data->dev_private;
1965 	struct VMXNET3_RSSConf *dev_rss_conf = hw->rss_conf;
1966 
1967 	if (reta_size != dev_rss_conf->indTableSize) {
1968 		PMD_DRV_LOG(ERR,
1969 			"The size of hash lookup table configured (%d) doesn't match "
1970 			"the supported number (%d)",
1971 			reta_size, dev_rss_conf->indTableSize);
1972 		return -EINVAL;
1973 	}
1974 
1975 	for (i = 0; i < reta_size; i++) {
1976 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1977 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1978 		if (reta_conf[idx].mask & RTE_BIT64(shift))
1979 			dev_rss_conf->indTable[i] = (uint8_t)reta_conf[idx].reta[shift];
1980 	}
1981 
1982 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1983 				VMXNET3_CMD_UPDATE_RSSIDT);
1984 
1985 	return 0;
1986 }
1987 
1988 static int
1989 vmxnet3_rss_reta_query(struct rte_eth_dev *dev,
1990 		       struct rte_eth_rss_reta_entry64 *reta_conf,
1991 		       uint16_t reta_size)
1992 {
1993 	int i, idx, shift;
1994 	struct vmxnet3_hw *hw = dev->data->dev_private;
1995 	struct VMXNET3_RSSConf *dev_rss_conf = hw->rss_conf;
1996 
1997 	if (reta_size != dev_rss_conf->indTableSize) {
1998 		PMD_DRV_LOG(ERR,
1999 			"Size of requested hash lookup table (%d) doesn't "
2000 			"match the configured size (%d)",
2001 			reta_size, dev_rss_conf->indTableSize);
2002 		return -EINVAL;
2003 	}
2004 
2005 	for (i = 0; i < reta_size; i++) {
2006 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2007 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2008 		if (reta_conf[idx].mask & RTE_BIT64(shift))
2009 			reta_conf[idx].reta[shift] = dev_rss_conf->indTable[i];
2010 	}
2011 
2012 	return 0;
2013 }
2014