xref: /dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c (revision da7e701151ea8b742d4c38ace3e4fefd1b4507fc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <fcntl.h>
13 #include <inttypes.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
17 
18 #include <rte_interrupts.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_pci.h>
22 #include <bus_pci_driver.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
26 #include <rte_eal.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <ethdev_driver.h>
30 #include <ethdev_pci.h>
31 #include <rte_string_fns.h>
32 #include <rte_malloc.h>
33 #include <dev_driver.h>
34 
35 #include "base/vmxnet3_defs.h"
36 
37 #include "vmxnet3_ring.h"
38 #include "vmxnet3_logs.h"
39 #include "vmxnet3_ethdev.h"
40 
41 #define	VMXNET3_TX_MAX_SEG	UINT8_MAX
42 
43 #define VMXNET3_TX_OFFLOAD_CAP		\
44 	(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |	\
45 	 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |	\
46 	 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |	\
47 	 RTE_ETH_TX_OFFLOAD_TCP_TSO |	\
48 	 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
49 
50 #define VMXNET3_RX_OFFLOAD_CAP		\
51 	(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |	\
52 	 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |   \
53 	 RTE_ETH_RX_OFFLOAD_SCATTER |	\
54 	 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |	\
55 	 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |	\
56 	 RTE_ETH_RX_OFFLOAD_TCP_LRO |	\
57 	 RTE_ETH_RX_OFFLOAD_RSS_HASH)
58 
59 int vmxnet3_segs_dynfield_offset = -1;
60 
61 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
62 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
63 static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
64 static int vmxnet3_dev_start(struct rte_eth_dev *dev);
65 static int vmxnet3_dev_stop(struct rte_eth_dev *dev);
66 static int vmxnet3_dev_close(struct rte_eth_dev *dev);
67 static int vmxnet3_dev_reset(struct rte_eth_dev *dev);
68 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
69 static int vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
70 static int vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
71 static int vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
72 static int vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
73 static int __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
74 				     int wait_to_complete);
75 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
76 				   int wait_to_complete);
77 static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
78 static int vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
79 				  struct rte_eth_stats *stats);
80 static int vmxnet3_dev_stats_reset(struct rte_eth_dev *dev);
81 static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
82 					struct rte_eth_xstat_name *xstats,
83 					unsigned int n);
84 static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev,
85 				  struct rte_eth_xstat *xstats, unsigned int n);
86 static int vmxnet3_dev_info_get(struct rte_eth_dev *dev,
87 				struct rte_eth_dev_info *dev_info);
88 static int vmxnet3_hw_ver_get(struct rte_eth_dev *dev,
89 			      char *fw_version, size_t fw_size);
90 static const uint32_t *
91 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
92 static int vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
93 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
94 				       uint16_t vid, int on);
95 static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
96 static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
97 				 struct rte_ether_addr *mac_addr);
98 static void vmxnet3_process_events(struct rte_eth_dev *dev);
99 static void vmxnet3_interrupt_handler(void *param);
100 static int
101 vmxnet3_rss_reta_update(struct rte_eth_dev *dev,
102 			struct rte_eth_rss_reta_entry64 *reta_conf,
103 			uint16_t reta_size);
104 static int
105 vmxnet3_rss_reta_query(struct rte_eth_dev *dev,
106 		       struct rte_eth_rss_reta_entry64 *reta_conf,
107 		       uint16_t reta_size);
108 
109 static int vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
110 						uint16_t queue_id);
111 static int vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
112 						uint16_t queue_id);
113 
114 /*
115  * The set of PCI devices this driver supports
116  */
117 #define VMWARE_PCI_VENDOR_ID 0x15AD
118 #define VMWARE_DEV_ID_VMXNET3 0x07B0
119 static const struct rte_pci_id pci_id_vmxnet3_map[] = {
120 	{ RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) },
121 	{ .vendor_id = 0, /* sentinel */ },
122 };
123 
124 static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
125 	.dev_configure        = vmxnet3_dev_configure,
126 	.dev_start            = vmxnet3_dev_start,
127 	.dev_stop             = vmxnet3_dev_stop,
128 	.dev_close            = vmxnet3_dev_close,
129 	.dev_reset            = vmxnet3_dev_reset,
130 	.link_update          = vmxnet3_dev_link_update,
131 	.promiscuous_enable   = vmxnet3_dev_promiscuous_enable,
132 	.promiscuous_disable  = vmxnet3_dev_promiscuous_disable,
133 	.allmulticast_enable  = vmxnet3_dev_allmulticast_enable,
134 	.allmulticast_disable = vmxnet3_dev_allmulticast_disable,
135 	.mac_addr_set         = vmxnet3_mac_addr_set,
136 	.mtu_set              = vmxnet3_dev_mtu_set,
137 	.stats_get            = vmxnet3_dev_stats_get,
138 	.stats_reset          = vmxnet3_dev_stats_reset,
139 	.xstats_get           = vmxnet3_dev_xstats_get,
140 	.xstats_get_names     = vmxnet3_dev_xstats_get_names,
141 	.dev_infos_get        = vmxnet3_dev_info_get,
142 	.fw_version_get       = vmxnet3_hw_ver_get,
143 	.dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
144 	.vlan_filter_set      = vmxnet3_dev_vlan_filter_set,
145 	.vlan_offload_set     = vmxnet3_dev_vlan_offload_set,
146 	.rx_queue_setup       = vmxnet3_dev_rx_queue_setup,
147 	.rx_queue_release     = vmxnet3_dev_rx_queue_release,
148 	.rx_queue_intr_enable = vmxnet3_dev_rx_queue_intr_enable,
149 	.rx_queue_intr_disable = vmxnet3_dev_rx_queue_intr_disable,
150 	.tx_queue_setup       = vmxnet3_dev_tx_queue_setup,
151 	.tx_queue_release     = vmxnet3_dev_tx_queue_release,
152 	.reta_update          = vmxnet3_rss_reta_update,
153 	.reta_query           = vmxnet3_rss_reta_query,
154 };
155 
156 struct vmxnet3_xstats_name_off {
157 	char name[RTE_ETH_XSTATS_NAME_SIZE];
158 	unsigned int offset;
159 };
160 
161 /* tx_qX_ is prepended to the name string here */
162 static const struct vmxnet3_xstats_name_off vmxnet3_txq_stat_strings[] = {
163 	{"drop_total",         offsetof(struct vmxnet3_txq_stats, drop_total)},
164 	{"drop_too_many_segs", offsetof(struct vmxnet3_txq_stats, drop_too_many_segs)},
165 	{"drop_tso",           offsetof(struct vmxnet3_txq_stats, drop_tso)},
166 	{"tx_ring_full",       offsetof(struct vmxnet3_txq_stats, tx_ring_full)},
167 };
168 
169 /* rx_qX_ is prepended to the name string here */
170 static const struct vmxnet3_xstats_name_off vmxnet3_rxq_stat_strings[] = {
171 	{"drop_total",           offsetof(struct vmxnet3_rxq_stats, drop_total)},
172 	{"drop_err",             offsetof(struct vmxnet3_rxq_stats, drop_err)},
173 	{"drop_fcs",             offsetof(struct vmxnet3_rxq_stats, drop_fcs)},
174 	{"rx_buf_alloc_failure", offsetof(struct vmxnet3_rxq_stats, rx_buf_alloc_failure)},
175 };
176 
177 static const struct rte_memzone *
178 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
179 		 const char *post_string, int socket_id,
180 		 uint16_t align, bool reuse)
181 {
182 	char z_name[RTE_MEMZONE_NAMESIZE];
183 	const struct rte_memzone *mz;
184 
185 	snprintf(z_name, sizeof(z_name), "eth_p%d_%s",
186 			dev->data->port_id, post_string);
187 
188 	mz = rte_memzone_lookup(z_name);
189 	if (!reuse) {
190 		if (mz)
191 			rte_memzone_free(mz);
192 		return rte_memzone_reserve_aligned(z_name, size, socket_id,
193 				RTE_MEMZONE_IOVA_CONTIG, align);
194 	}
195 
196 	if (mz)
197 		return mz;
198 
199 	return rte_memzone_reserve_aligned(z_name, size, socket_id,
200 			RTE_MEMZONE_IOVA_CONTIG, align);
201 }
202 
203 /*
204  * Enable the given interrupt
205  */
206 static void
207 vmxnet3_enable_intr(struct vmxnet3_hw *hw, unsigned int intr_idx)
208 {
209 	PMD_INIT_FUNC_TRACE();
210 	VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + intr_idx * 8, 0);
211 }
212 
213 /*
214  * Disable the given interrupt
215  */
216 static void
217 vmxnet3_disable_intr(struct vmxnet3_hw *hw, unsigned int intr_idx)
218 {
219 	PMD_INIT_FUNC_TRACE();
220 	VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + intr_idx * 8, 1);
221 }
222 
223 /*
224  * Simple helper to get intrCtrl and eventIntrIdx based on config and hw version
225  */
226 static void
227 vmxnet3_get_intr_ctrl_ev(struct vmxnet3_hw *hw,
228 			 uint8 **out_eventIntrIdx,
229 			 uint32 **out_intrCtrl)
230 {
231 
232 	if (VMXNET3_VERSION_GE_6(hw) && hw->queuesExtEnabled) {
233 		*out_eventIntrIdx = &hw->shared->devReadExt.intrConfExt.eventIntrIdx;
234 		*out_intrCtrl = &hw->shared->devReadExt.intrConfExt.intrCtrl;
235 	} else {
236 		*out_eventIntrIdx = &hw->shared->devRead.intrConf.eventIntrIdx;
237 		*out_intrCtrl = &hw->shared->devRead.intrConf.intrCtrl;
238 	}
239 }
240 
241 /*
242  * Disable all intrs used by the device
243  */
244 static void
245 vmxnet3_disable_all_intrs(struct vmxnet3_hw *hw)
246 {
247 	int i;
248 	uint8 *eventIntrIdx;
249 	uint32 *intrCtrl;
250 
251 	PMD_INIT_FUNC_TRACE();
252 	vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl);
253 
254 	*intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
255 
256 	for (i = 0; i < hw->intr.num_intrs; i++)
257 		vmxnet3_disable_intr(hw, i);
258 }
259 
260 /*
261  * Enable all intrs used by the device
262  */
263 static void
264 vmxnet3_enable_all_intrs(struct vmxnet3_hw *hw)
265 {
266 	uint8 *eventIntrIdx;
267 	uint32 *intrCtrl;
268 
269 	PMD_INIT_FUNC_TRACE();
270 	vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl);
271 
272 	*intrCtrl &= rte_cpu_to_le_32(~VMXNET3_IC_DISABLE_ALL);
273 
274 	if (hw->intr.lsc_only) {
275 		vmxnet3_enable_intr(hw, *eventIntrIdx);
276 	} else {
277 		int i;
278 
279 		for (i = 0; i < hw->intr.num_intrs; i++)
280 			vmxnet3_enable_intr(hw, i);
281 	}
282 }
283 
284 /*
285  * Gets tx data ring descriptor size.
286  */
287 static uint16_t
288 eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
289 {
290 	uint16 txdata_desc_size;
291 
292 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
293 			       VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
294 	txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
295 
296 	return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
297 		txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
298 		txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ?
299 		sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size;
300 }
301 
302 static int
303 eth_vmxnet3_setup_capabilities(struct vmxnet3_hw *hw,
304 			       struct rte_eth_dev *eth_dev)
305 {
306 	uint32_t dcr, ptcr, value;
307 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
308 
309 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
310 			       VMXNET3_CMD_GET_MAX_CAPABILITIES);
311 	value = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
312 	hw->max_capabilities[0] = value;
313 	dcr = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_DCR);
314 	hw->DCR_capabilities[0] = dcr;
315 	hw->used_DCR_capabilities[0] = 0;
316 	ptcr = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_PTCR);
317 	hw->PTCR_capabilities[0] = ptcr;
318 	hw->used_PTCR_capabilities[0] = 0;
319 
320 	if (hw->uptv2_enabled && !(ptcr & (1 << VMXNET3_DCR_ERROR))) {
321 		PMD_DRV_LOG(NOTICE, "UPTv2 enabled");
322 		hw->used_PTCR_capabilities[0] = ptcr;
323 	} else {
324 		/* Use all DCR capabilities, but disable large bar */
325 		hw->used_DCR_capabilities[0] = dcr &
326 					(~(1UL << VMXNET3_CAP_LARGE_BAR));
327 		PMD_DRV_LOG(NOTICE, "UPTv2 disabled");
328 	}
329 	if (hw->DCR_capabilities[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
330 	    hw->PTCR_capabilities[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
331 		if (hw->uptv2_enabled) {
332 			hw->used_PTCR_capabilities[0] |=
333 				(1UL << VMXNET3_CAP_OOORX_COMP);
334 		}
335 	}
336 	if (hw->used_PTCR_capabilities[0]) {
337 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DCR,
338 				       hw->used_PTCR_capabilities[0]);
339 	} else if (hw->used_DCR_capabilities[0]) {
340 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DCR,
341 				       hw->used_DCR_capabilities[0]);
342 	}
343 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
344 	dcr = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
345 	hw->used_DCR_capabilities[0] = dcr;
346 	PMD_DRV_LOG(DEBUG, "Dev " PCI_PRI_FMT ", vmxnet3 v%d, UPT enabled: %s, "
347 		    "DCR0=0x%08x, used DCR=0x%08x, "
348 		    "PTCR=0x%08x, used PTCR=0x%08x",
349 		    pci_dev->addr.domain, pci_dev->addr.bus,
350 		    pci_dev->addr.devid, pci_dev->addr.function, hw->version,
351 		    hw->uptv2_enabled ? "true" : "false",
352 		    hw->DCR_capabilities[0], hw->used_DCR_capabilities[0],
353 		    hw->PTCR_capabilities[0], hw->used_PTCR_capabilities[0]);
354 	return 0;
355 }
356 
357 /*
358  * It returns 0 on success.
359  */
360 static int
361 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
362 {
363 	struct rte_pci_device *pci_dev;
364 	struct vmxnet3_hw *hw = eth_dev->data->dev_private;
365 	uint32_t mac_hi, mac_lo, ver;
366 	struct rte_eth_link link;
367 	static const struct rte_mbuf_dynfield vmxnet3_segs_dynfield_desc = {
368 		.name = VMXNET3_SEGS_DYNFIELD_NAME,
369 		.size = sizeof(vmxnet3_segs_dynfield_t),
370 		.align = __alignof__(vmxnet3_segs_dynfield_t),
371 	};
372 
373 	PMD_INIT_FUNC_TRACE();
374 
375 	eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
376 	eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
377 	eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
378 	eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
379 	eth_dev->rx_queue_count = vmxnet3_dev_rx_queue_count;
380 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
381 
382 	/* extra mbuf field is required to guess MSS */
383 	vmxnet3_segs_dynfield_offset =
384 		rte_mbuf_dynfield_register(&vmxnet3_segs_dynfield_desc);
385 	if (vmxnet3_segs_dynfield_offset < 0) {
386 		PMD_INIT_LOG(ERR, "Cannot register mbuf field.");
387 		return -rte_errno;
388 	}
389 
390 	/*
391 	 * for secondary processes, we don't initialize any further as primary
392 	 * has already done this work.
393 	 */
394 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
395 		return 0;
396 
397 	rte_eth_copy_pci_info(eth_dev, pci_dev);
398 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
399 
400 	/* Vendor and Device ID need to be set before init of shared code */
401 	hw->device_id = pci_dev->id.device_id;
402 	hw->vendor_id = pci_dev->id.vendor_id;
403 	hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
404 	hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
405 
406 	hw->num_rx_queues = 1;
407 	hw->num_tx_queues = 1;
408 	hw->bufs_per_pkt = 1;
409 
410 	/* Check h/w version compatibility with driver. */
411 	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
412 
413 	if (ver & (1 << VMXNET3_REV_7)) {
414 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
415 				       1 << VMXNET3_REV_7);
416 		hw->version = VMXNET3_REV_7 + 1;
417 	} else if (ver & (1 << VMXNET3_REV_6)) {
418 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
419 				       1 << VMXNET3_REV_6);
420 		hw->version = VMXNET3_REV_6 + 1;
421 	} else if (ver & (1 << VMXNET3_REV_5)) {
422 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
423 				       1 << VMXNET3_REV_5);
424 		hw->version = VMXNET3_REV_5 + 1;
425 	} else if (ver & (1 << VMXNET3_REV_4)) {
426 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
427 				       1 << VMXNET3_REV_4);
428 		hw->version = VMXNET3_REV_4 + 1;
429 	} else if (ver & (1 << VMXNET3_REV_3)) {
430 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
431 				       1 << VMXNET3_REV_3);
432 		hw->version = VMXNET3_REV_3 + 1;
433 	} else if (ver & (1 << VMXNET3_REV_2)) {
434 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
435 				       1 << VMXNET3_REV_2);
436 		hw->version = VMXNET3_REV_2 + 1;
437 	} else if (ver & (1 << VMXNET3_REV_1)) {
438 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
439 				       1 << VMXNET3_REV_1);
440 		hw->version = VMXNET3_REV_1 + 1;
441 	} else {
442 		PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver);
443 		return -EIO;
444 	}
445 
446 	PMD_INIT_LOG(INFO, "Using device v%d", hw->version);
447 
448 	/* Check UPT version compatibility with driver. */
449 	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
450 	PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
451 	if (ver & 0x1)
452 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
453 	else {
454 		PMD_INIT_LOG(ERR, "Incompatible UPT version.");
455 		return -EIO;
456 	}
457 
458 	if (VMXNET3_VERSION_GE_7(hw)) {
459 		/* start with UPTv2 enabled to avoid ESXi issues */
460 		hw->uptv2_enabled = TRUE;
461 		eth_vmxnet3_setup_capabilities(hw, eth_dev);
462 	}
463 
464 	if (hw->used_DCR_capabilities[0] & (1 << VMXNET3_CAP_LARGE_BAR)) {
465 		hw->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
466 		hw->rx_prod_offset[0] = VMXNET3_REG_LB_RXPROD;
467 		hw->rx_prod_offset[1] = VMXNET3_REG_LB_RXPROD2;
468 	} else {
469 		hw->tx_prod_offset = VMXNET3_REG_TXPROD;
470 		hw->rx_prod_offset[0] = VMXNET3_REG_RXPROD;
471 		hw->rx_prod_offset[1] = VMXNET3_REG_RXPROD2;
472 	}
473 
474 	/* Getting MAC Address */
475 	mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
476 	mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
477 	memcpy(hw->perm_addr, &mac_lo, 4);
478 	memcpy(hw->perm_addr + 4, &mac_hi, 2);
479 
480 	/* Allocate memory for storing MAC addresses */
481 	eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", RTE_ETHER_ADDR_LEN *
482 					       VMXNET3_MAX_MAC_ADDRS, 0);
483 	if (eth_dev->data->mac_addrs == NULL) {
484 		PMD_INIT_LOG(ERR,
485 			     "Failed to allocate %d bytes needed to store MAC addresses",
486 			     RTE_ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
487 		return -ENOMEM;
488 	}
489 	/* Copy the permanent MAC address */
490 	rte_ether_addr_copy((struct rte_ether_addr *)hw->perm_addr,
491 			&eth_dev->data->mac_addrs[0]);
492 
493 	PMD_INIT_LOG(DEBUG, "MAC Address : " RTE_ETHER_ADDR_PRT_FMT,
494 		     hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
495 		     hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
496 
497 	/* Put device in Quiesce Mode */
498 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
499 
500 	/* allow untagged pkts */
501 	VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
502 
503 	hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
504 		eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc);
505 
506 	hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
507 		VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
508 	RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
509 		   hw->rxdata_desc_size);
510 
511 	/* clear shadow stats */
512 	memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
513 	memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
514 
515 	/* clear snapshot stats */
516 	memset(hw->snapshot_tx_stats, 0, sizeof(hw->snapshot_tx_stats));
517 	memset(hw->snapshot_rx_stats, 0, sizeof(hw->snapshot_rx_stats));
518 
519 	/* set the initial link status */
520 	memset(&link, 0, sizeof(link));
521 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
522 	link.link_speed = RTE_ETH_SPEED_NUM_10G;
523 	link.link_autoneg = RTE_ETH_LINK_FIXED;
524 	rte_eth_linkstatus_set(eth_dev, &link);
525 
526 	return 0;
527 }
528 
529 static int
530 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
531 {
532 	struct vmxnet3_hw *hw = eth_dev->data->dev_private;
533 
534 	PMD_INIT_FUNC_TRACE();
535 
536 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
537 		return 0;
538 
539 	if (hw->adapter_stopped == 0) {
540 		PMD_INIT_LOG(DEBUG, "Device has not been closed.");
541 		return -EBUSY;
542 	}
543 
544 	return 0;
545 }
546 
547 static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
548 	struct rte_pci_device *pci_dev)
549 {
550 	return rte_eth_dev_pci_generic_probe(pci_dev,
551 		sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init);
552 }
553 
554 static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
555 {
556 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit);
557 }
558 
559 static struct rte_pci_driver rte_vmxnet3_pmd = {
560 	.id_table = pci_id_vmxnet3_map,
561 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
562 	.probe = eth_vmxnet3_pci_probe,
563 	.remove = eth_vmxnet3_pci_remove,
564 };
565 
566 static void
567 vmxnet3_alloc_intr_resources(struct rte_eth_dev *dev)
568 {
569 	struct vmxnet3_hw *hw = dev->data->dev_private;
570 	uint32_t cfg;
571 	int nvec = 1; /* for link event */
572 
573 	/* intr settings */
574 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
575 			       VMXNET3_CMD_GET_CONF_INTR);
576 	cfg = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
577 	hw->intr.type = cfg & 0x3;
578 	hw->intr.mask_mode = (cfg >> 2) & 0x3;
579 
580 	if (hw->intr.type == VMXNET3_IT_AUTO)
581 		hw->intr.type = VMXNET3_IT_MSIX;
582 
583 	if (hw->intr.type == VMXNET3_IT_MSIX) {
584 		/* only support shared tx/rx intr */
585 		if (hw->num_tx_queues != hw->num_rx_queues)
586 			goto msix_err;
587 
588 		nvec += hw->num_rx_queues;
589 		hw->intr.num_intrs = nvec;
590 		return;
591 	}
592 
593 msix_err:
594 	/* the tx/rx queue interrupt will be disabled */
595 	hw->intr.num_intrs = 2;
596 	hw->intr.lsc_only = TRUE;
597 	PMD_INIT_LOG(INFO, "Enabled MSI-X with %d vectors", hw->intr.num_intrs);
598 }
599 
600 static int
601 vmxnet3_dev_configure(struct rte_eth_dev *dev)
602 {
603 	const struct rte_memzone *mz;
604 	struct vmxnet3_hw *hw = dev->data->dev_private;
605 	size_t size;
606 
607 	PMD_INIT_FUNC_TRACE();
608 
609 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
610 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
611 
612 	if (!VMXNET3_VERSION_GE_6(hw)) {
613 		if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
614 			PMD_INIT_LOG(ERR,
615 				     "ERROR: Number of rx queues not power of 2");
616 			return -EINVAL;
617 		}
618 	}
619 
620 	/* At this point, the number of queues requested has already
621 	 * been validated against dev_infos max queues by EAL
622 	 */
623 	if (dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES ||
624 	    dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES) {
625 		hw->queuesExtEnabled = 1;
626 	} else {
627 		hw->queuesExtEnabled = 0;
628 	}
629 
630 	size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
631 		dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
632 
633 	if (size > UINT16_MAX)
634 		return -EINVAL;
635 
636 	hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
637 	hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
638 
639 	/*
640 	 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
641 	 * on current socket
642 	 */
643 	mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
644 			      "shared", rte_socket_id(), 8, 1);
645 
646 	if (mz == NULL) {
647 		PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
648 		return -ENOMEM;
649 	}
650 	memset(mz->addr, 0, mz->len);
651 
652 	hw->shared = mz->addr;
653 	hw->sharedPA = mz->iova;
654 
655 	/*
656 	 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
657 	 * on current socket.
658 	 *
659 	 * We cannot reuse this memzone from previous allocation as its size
660 	 * depends on the number of tx and rx queues, which could be different
661 	 * from one config to another.
662 	 */
663 	mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(),
664 			      VMXNET3_QUEUE_DESC_ALIGN, 0);
665 	if (mz == NULL) {
666 		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
667 		return -ENOMEM;
668 	}
669 	memset(mz->addr, 0, mz->len);
670 
671 	hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
672 	hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
673 
674 	hw->queueDescPA = mz->iova;
675 	hw->queue_desc_len = (uint16_t)size;
676 
677 	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
678 		/* Allocate memory structure for UPT1_RSSConf and configure */
679 		mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
680 				      "rss_conf", rte_socket_id(),
681 				      RTE_CACHE_LINE_SIZE, 1);
682 		if (mz == NULL) {
683 			PMD_INIT_LOG(ERR,
684 				     "ERROR: Creating rss_conf structure zone");
685 			return -ENOMEM;
686 		}
687 		memset(mz->addr, 0, mz->len);
688 
689 		hw->rss_conf = mz->addr;
690 		hw->rss_confPA = mz->iova;
691 	}
692 
693 	vmxnet3_alloc_intr_resources(dev);
694 
695 	return 0;
696 }
697 
698 static void
699 vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
700 {
701 	uint32_t val;
702 
703 	PMD_INIT_LOG(DEBUG,
704 		     "Writing MAC Address : " RTE_ETHER_ADDR_PRT_FMT,
705 		     addr[0], addr[1], addr[2],
706 		     addr[3], addr[4], addr[5]);
707 
708 	memcpy(&val, addr, 4);
709 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
710 
711 	memcpy(&val, addr + 4, 2);
712 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
713 }
714 
715 /*
716  * Configure the hardware to generate MSI-X interrupts.
717  * If setting up MSIx fails, try setting up MSI (only 1 interrupt vector
718  * which will be disabled to allow lsc to work).
719  *
720  * Returns 0 on success and -1 otherwise.
721  */
722 static int
723 vmxnet3_configure_msix(struct rte_eth_dev *dev)
724 {
725 	struct vmxnet3_hw *hw = dev->data->dev_private;
726 	struct rte_intr_handle *intr_handle = dev->intr_handle;
727 	uint16_t intr_vector;
728 	int i;
729 
730 	hw->intr.event_intr_idx = 0;
731 
732 	/* only vfio-pci driver can support interrupt mode. */
733 	if (!rte_intr_cap_multiple(intr_handle) ||
734 	    dev->data->dev_conf.intr_conf.rxq == 0)
735 		return -1;
736 
737 	intr_vector = dev->data->nb_rx_queues;
738 	if (intr_vector > MAX_RX_QUEUES(hw)) {
739 		PMD_INIT_LOG(ERR, "At most %d intr queues supported",
740 			     MAX_RX_QUEUES(hw));
741 		return -ENOTSUP;
742 	}
743 
744 	if (rte_intr_efd_enable(intr_handle, intr_vector)) {
745 		PMD_INIT_LOG(ERR, "Failed to enable fastpath event fd");
746 		return -1;
747 	}
748 
749 	if (rte_intr_dp_is_en(intr_handle)) {
750 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
751 						   dev->data->nb_rx_queues)) {
752 			PMD_INIT_LOG(ERR, "Failed to allocate %d Rx queues intr_vec",
753 					dev->data->nb_rx_queues);
754 			rte_intr_efd_disable(intr_handle);
755 			return -ENOMEM;
756 		}
757 	}
758 
759 	if (!rte_intr_allow_others(intr_handle) &&
760 	    dev->data->dev_conf.intr_conf.lsc != 0) {
761 		PMD_INIT_LOG(ERR, "not enough intr vector to support both Rx interrupt and LSC");
762 		rte_intr_vec_list_free(intr_handle);
763 		rte_intr_efd_disable(intr_handle);
764 		return -1;
765 	}
766 
767 	/* if we cannot allocate one MSI-X vector per queue, don't enable
768 	 * interrupt mode.
769 	 */
770 	if (hw->intr.num_intrs !=
771 				(rte_intr_nb_efd_get(intr_handle) + 1)) {
772 		PMD_INIT_LOG(ERR, "Device configured with %d Rx intr vectors, expecting %d",
773 				hw->intr.num_intrs,
774 				rte_intr_nb_efd_get(intr_handle) + 1);
775 		rte_intr_vec_list_free(intr_handle);
776 		rte_intr_efd_disable(intr_handle);
777 		return -1;
778 	}
779 
780 	for (i = 0; i < dev->data->nb_rx_queues; i++)
781 		if (rte_intr_vec_list_index_set(intr_handle, i, i + 1))
782 			return -rte_errno;
783 
784 	for (i = 0; i < hw->intr.num_intrs; i++)
785 		hw->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
786 
787 	PMD_INIT_LOG(INFO, "intr type %u, mode %u, %u vectors allocated",
788 		    hw->intr.type, hw->intr.mask_mode, hw->intr.num_intrs);
789 
790 	return 0;
791 }
792 
793 static int
794 vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
795 {
796 	struct vmxnet3_hw *hw = dev->data->dev_private;
797 	Vmxnet3_DriverShared *shared = hw->shared;
798 	Vmxnet3_CmdInfo *cmdInfo;
799 	struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES];
800 	uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES];
801 	uint32_t num, i, j, size;
802 
803 	if (hw->memRegsPA == 0) {
804 		const struct rte_memzone *mz;
805 
806 		size = sizeof(Vmxnet3_MemRegs) +
807 			(VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) *
808 			sizeof(Vmxnet3_MemoryRegion);
809 
810 		mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8,
811 				      1);
812 		if (mz == NULL) {
813 			PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone");
814 			return -ENOMEM;
815 		}
816 		memset(mz->addr, 0, mz->len);
817 		hw->memRegs = mz->addr;
818 		hw->memRegsPA = mz->iova;
819 	}
820 
821 	num = hw->num_rx_queues;
822 
823 	for (i = 0; i < num; i++) {
824 		vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
825 
826 		mp[i] = rxq->mp;
827 		index[i] = 1 << i;
828 	}
829 
830 	/*
831 	 * The same mempool could be used by multiple queues. In such a case,
832 	 * remove duplicate mempool entries. Only one entry is kept with
833 	 * bitmask indicating queues that are using this mempool.
834 	 */
835 	for (i = 1; i < num; i++) {
836 		for (j = 0; j < i; j++) {
837 			if (mp[i] == mp[j]) {
838 				mp[i] = NULL;
839 				index[j] |= 1 << i;
840 				break;
841 			}
842 		}
843 	}
844 
845 	j = 0;
846 	for (i = 0; i < num; i++) {
847 		if (mp[i] == NULL)
848 			continue;
849 
850 		Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
851 
852 		mr->startPA =
853 			(uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->iova;
854 		mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
855 			STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
856 		mr->txQueueBits = index[i];
857 		mr->rxQueueBits = index[i];
858 
859 		PMD_INIT_LOG(INFO,
860 			     "index: %u startPA: %" PRIu64 " length: %u, "
861 			     "rxBits: %x",
862 			     j, mr->startPA, mr->length, mr->rxQueueBits);
863 		j++;
864 	}
865 	hw->memRegs->numRegs = j;
866 	PMD_INIT_LOG(INFO, "numRegs: %u", j);
867 
868 	size = sizeof(Vmxnet3_MemRegs) +
869 		(j - 1) * sizeof(Vmxnet3_MemoryRegion);
870 
871 	cmdInfo = &shared->cu.cmdInfo;
872 	cmdInfo->varConf.confVer = 1;
873 	cmdInfo->varConf.confLen = size;
874 	cmdInfo->varConf.confPA = hw->memRegsPA;
875 
876 	return 0;
877 }
878 
879 static int
880 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
881 {
882 	struct rte_eth_conf port_conf = dev->data->dev_conf;
883 	struct vmxnet3_hw *hw = dev->data->dev_private;
884 	struct rte_intr_handle *intr_handle = dev->intr_handle;
885 	uint32_t mtu = dev->data->mtu;
886 	Vmxnet3_DriverShared *shared = hw->shared;
887 	Vmxnet3_DSDevRead *devRead = &shared->devRead;
888 	struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
889 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
890 	uint32_t i;
891 	int ret;
892 
893 	hw->mtu = mtu;
894 
895 	shared->magic = VMXNET3_REV1_MAGIC;
896 	devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
897 
898 	/* Setting up Guest OS information */
899 	devRead->misc.driverInfo.gos.gosBits   = sizeof(void *) == 4 ?
900 		VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64;
901 	devRead->misc.driverInfo.gos.gosType   = VMXNET3_GOS_TYPE_LINUX;
902 	devRead->misc.driverInfo.vmxnet3RevSpt = 1;
903 	devRead->misc.driverInfo.uptVerSpt     = 1;
904 
905 	devRead->misc.mtu = rte_le_to_cpu_32(mtu);
906 	devRead->misc.queueDescPA  = hw->queueDescPA;
907 	devRead->misc.queueDescLen = hw->queue_desc_len;
908 	devRead->misc.numTxQueues  = hw->num_tx_queues;
909 	devRead->misc.numRxQueues  = hw->num_rx_queues;
910 
911 	for (i = 0; i < hw->num_tx_queues; i++) {
912 		Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
913 		vmxnet3_tx_queue_t *txq  = dev->data->tx_queues[i];
914 
915 		txq->shared = &hw->tqd_start[i];
916 
917 		tqd->ctrl.txNumDeferred  = 0;
918 		tqd->ctrl.txThreshold    = 1;
919 		tqd->conf.txRingBasePA   = txq->cmd_ring.basePA;
920 		tqd->conf.compRingBasePA = txq->comp_ring.basePA;
921 		tqd->conf.dataRingBasePA = txq->data_ring.basePA;
922 
923 		tqd->conf.txRingSize   = txq->cmd_ring.size;
924 		tqd->conf.compRingSize = txq->comp_ring.size;
925 		tqd->conf.dataRingSize = txq->data_ring.size;
926 		tqd->conf.txDataRingDescSize = txq->txdata_desc_size;
927 
928 		if (hw->intr.lsc_only)
929 			tqd->conf.intrIdx = 1;
930 		else
931 			tqd->conf.intrIdx =
932 				rte_intr_vec_list_index_get(intr_handle,
933 								   i);
934 		tqd->status.stopped = TRUE;
935 		tqd->status.error   = 0;
936 		memset(&tqd->stats, 0, sizeof(tqd->stats));
937 	}
938 
939 	for (i = 0; i < hw->num_rx_queues; i++) {
940 		Vmxnet3_RxQueueDesc *rqd  = &hw->rqd_start[i];
941 		vmxnet3_rx_queue_t *rxq   = dev->data->rx_queues[i];
942 
943 		rxq->shared = &hw->rqd_start[i];
944 
945 		rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
946 		rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
947 		rqd->conf.compRingBasePA  = rxq->comp_ring.basePA;
948 
949 		rqd->conf.rxRingSize[0]   = rxq->cmd_ring[0].size;
950 		rqd->conf.rxRingSize[1]   = rxq->cmd_ring[1].size;
951 		rqd->conf.compRingSize    = rxq->comp_ring.size;
952 
953 		if (VMXNET3_VERSION_GE_3(hw)) {
954 			rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA;
955 			rqd->conf.rxDataRingDescSize = rxq->data_desc_size;
956 		}
957 
958 		if (hw->intr.lsc_only)
959 			rqd->conf.intrIdx = 1;
960 		else
961 			rqd->conf.intrIdx =
962 				rte_intr_vec_list_index_get(intr_handle,
963 								   i);
964 		rqd->status.stopped = TRUE;
965 		rqd->status.error   = 0;
966 		memset(&rqd->stats, 0, sizeof(rqd->stats));
967 	}
968 
969 	/* intr settings */
970 	if (VMXNET3_VERSION_GE_6(hw) && hw->queuesExtEnabled) {
971 		devReadExt->intrConfExt.autoMask = hw->intr.mask_mode ==
972 						   VMXNET3_IMM_AUTO;
973 		devReadExt->intrConfExt.numIntrs = hw->intr.num_intrs;
974 		for (i = 0; i < hw->intr.num_intrs; i++)
975 			devReadExt->intrConfExt.modLevels[i] =
976 				hw->intr.mod_levels[i];
977 
978 		devReadExt->intrConfExt.eventIntrIdx = hw->intr.event_intr_idx;
979 		devReadExt->intrConfExt.intrCtrl |=
980 			rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
981 	} else {
982 		devRead->intrConf.autoMask = hw->intr.mask_mode ==
983 					     VMXNET3_IMM_AUTO;
984 		devRead->intrConf.numIntrs = hw->intr.num_intrs;
985 		for (i = 0; i < hw->intr.num_intrs; i++)
986 			devRead->intrConf.modLevels[i] = hw->intr.mod_levels[i];
987 
988 		devRead->intrConf.eventIntrIdx = hw->intr.event_intr_idx;
989 		devRead->intrConf.intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
990 	}
991 
992 	/* RxMode set to 0 of VMXNET3_RXM_xxx */
993 	devRead->rxFilterConf.rxMode = 0;
994 
995 	/* Setting up feature flags */
996 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
997 		devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
998 
999 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1000 		devRead->misc.uptFeatures |= VMXNET3_F_LRO;
1001 		devRead->misc.maxNumRxSG = 0;
1002 	}
1003 
1004 	if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
1005 		ret = vmxnet3_rss_configure(dev);
1006 		if (ret != VMXNET3_SUCCESS)
1007 			return ret;
1008 
1009 		devRead->misc.uptFeatures |= VMXNET3_F_RSS;
1010 		devRead->rssConfDesc.confVer = 1;
1011 		devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
1012 		devRead->rssConfDesc.confPA  = hw->rss_confPA;
1013 	}
1014 
1015 	ret = vmxnet3_dev_vlan_offload_set(dev,
1016 			RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
1017 	if (ret)
1018 		return ret;
1019 
1020 	vmxnet3_write_mac(hw, dev->data->mac_addrs->addr_bytes);
1021 
1022 	return VMXNET3_SUCCESS;
1023 }
1024 
1025 static void
1026 vmxnet3_init_bufsize(struct vmxnet3_hw *hw)
1027 {
1028 	struct Vmxnet3_DriverShared *shared = hw->shared;
1029 	union Vmxnet3_CmdInfo *cmd_info = &shared->cu.cmdInfo;
1030 
1031 	if (!VMXNET3_VERSION_GE_7(hw))
1032 		return;
1033 
1034 	cmd_info->ringBufSize.ring1BufSizeType0 = hw->rxdata_buf_size;
1035 	cmd_info->ringBufSize.ring1BufSizeType1 = 0;
1036 	cmd_info->ringBufSize.ring2BufSizeType1 = hw->rxdata_buf_size;
1037 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1038 			       VMXNET3_CMD_SET_RING_BUFFER_SIZE);
1039 }
1040 
1041 /*
1042  * Configure device link speed and setup link.
1043  * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
1044  * It returns 0 on success.
1045  */
1046 static int
1047 vmxnet3_dev_start(struct rte_eth_dev *dev)
1048 {
1049 	int ret;
1050 	struct vmxnet3_hw *hw = dev->data->dev_private;
1051 	uint16_t i;
1052 
1053 	PMD_INIT_FUNC_TRACE();
1054 
1055 	/* Save stats before it is reset by CMD_ACTIVATE */
1056 	vmxnet3_hw_stats_save(hw);
1057 
1058 	/* configure MSI-X */
1059 	ret = vmxnet3_configure_msix(dev);
1060 	if (ret < 0) {
1061 		/* revert to lsc only */
1062 		hw->intr.num_intrs = 2;
1063 		hw->intr.lsc_only = TRUE;
1064 	}
1065 
1066 	ret = vmxnet3_setup_driver_shared(dev);
1067 	if (ret != VMXNET3_SUCCESS)
1068 		return ret;
1069 
1070 	/* Exchange shared data with device */
1071 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
1072 			       VMXNET3_GET_ADDR_LO(hw->sharedPA));
1073 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
1074 			       VMXNET3_GET_ADDR_HI(hw->sharedPA));
1075 
1076 	/* Activate device by register write */
1077 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
1078 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1079 
1080 	if (ret != 0) {
1081 		PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
1082 		return -EINVAL;
1083 	}
1084 
1085 	/* Check memregs restrictions first */
1086 	if (dev->data->nb_rx_queues <= VMXNET3_MAX_RX_QUEUES &&
1087 	    dev->data->nb_tx_queues <= VMXNET3_MAX_TX_QUEUES) {
1088 		ret = vmxnet3_dev_setup_memreg(dev);
1089 		if (ret == 0) {
1090 			VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1091 					VMXNET3_CMD_REGISTER_MEMREGS);
1092 			ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1093 			if (ret != 0)
1094 				PMD_INIT_LOG(DEBUG,
1095 					"Failed in setup memory region cmd\n");
1096 			ret = 0;
1097 		} else {
1098 			PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
1099 		}
1100 	} else {
1101 		PMD_INIT_LOG(WARNING, "Memregs can't init (rx: %d, tx: %d)",
1102 			     dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1103 	}
1104 
1105 	if (VMXNET3_VERSION_GE_4(hw) &&
1106 	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
1107 		/* Check for additional RSS  */
1108 		ret = vmxnet3_v4_rss_configure(dev);
1109 		if (ret != VMXNET3_SUCCESS) {
1110 			PMD_INIT_LOG(ERR, "Failed to configure v4 RSS");
1111 			return ret;
1112 		}
1113 	}
1114 
1115 	/*
1116 	 * Load RX queues with blank mbufs and update next2fill index for device
1117 	 * Update RxMode of the device
1118 	 */
1119 	ret = vmxnet3_dev_rxtx_init(dev);
1120 	if (ret != VMXNET3_SUCCESS) {
1121 		PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL");
1122 		return ret;
1123 	}
1124 
1125 	vmxnet3_init_bufsize(hw);
1126 
1127 	hw->adapter_stopped = FALSE;
1128 
1129 	/* Setting proper Rx Mode and issue Rx Mode Update command */
1130 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
1131 
1132 	/* Setup interrupt callback  */
1133 	rte_intr_callback_register(dev->intr_handle,
1134 				   vmxnet3_interrupt_handler, dev);
1135 
1136 	if (rte_intr_enable(dev->intr_handle) < 0) {
1137 		PMD_INIT_LOG(ERR, "interrupt enable failed");
1138 		return -EIO;
1139 	}
1140 
1141 	/* enable all intrs */
1142 	vmxnet3_enable_all_intrs(hw);
1143 
1144 	vmxnet3_process_events(dev);
1145 
1146 	/*
1147 	 * Update link state from device since this won't be
1148 	 * done upon starting with lsc in use. This is done
1149 	 * only after enabling interrupts to avoid any race
1150 	 * where the link state could change without an
1151 	 * interrupt being fired.
1152 	 */
1153 	__vmxnet3_dev_link_update(dev, 0);
1154 
1155 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1156 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
1157 	for (i = 0; i < dev->data->nb_tx_queues; i++)
1158 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
1159 
1160 	return VMXNET3_SUCCESS;
1161 }
1162 
1163 /*
1164  * Stop device: disable rx and tx functions to allow for reconfiguring.
1165  */
1166 static int
1167 vmxnet3_dev_stop(struct rte_eth_dev *dev)
1168 {
1169 	struct rte_eth_link link;
1170 	struct vmxnet3_hw *hw = dev->data->dev_private;
1171 	struct rte_intr_handle *intr_handle = dev->intr_handle;
1172 	uint16_t i;
1173 	int ret;
1174 
1175 	PMD_INIT_FUNC_TRACE();
1176 
1177 	if (hw->adapter_stopped == 1) {
1178 		PMD_INIT_LOG(DEBUG, "Device already stopped.");
1179 		return 0;
1180 	}
1181 
1182 	do {
1183 		/* Unregister has lock to make sure there is no running cb.
1184 		 * This has to happen first since vmxnet3_interrupt_handler
1185 		 * reenables interrupts by calling vmxnet3_enable_intr
1186 		 */
1187 		ret = rte_intr_callback_unregister(intr_handle,
1188 						   vmxnet3_interrupt_handler,
1189 						   (void *)-1);
1190 	} while (ret == -EAGAIN);
1191 
1192 	if (ret < 0)
1193 		PMD_DRV_LOG(ERR, "Error attempting to unregister intr cb: %d",
1194 			    ret);
1195 
1196 	PMD_INIT_LOG(DEBUG, "Disabled %d intr callbacks", ret);
1197 
1198 	/* disable interrupts */
1199 	vmxnet3_disable_all_intrs(hw);
1200 
1201 	rte_intr_disable(intr_handle);
1202 
1203 	/* Clean datapath event and queue/vector mapping */
1204 	rte_intr_efd_disable(intr_handle);
1205 	rte_intr_vec_list_free(intr_handle);
1206 
1207 	/* quiesce the device first */
1208 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
1209 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
1210 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
1211 
1212 	/* reset the device */
1213 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
1214 	PMD_INIT_LOG(DEBUG, "Device reset.");
1215 
1216 	vmxnet3_dev_clear_queues(dev);
1217 
1218 	/* Clear recorded link status */
1219 	memset(&link, 0, sizeof(link));
1220 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1221 	link.link_speed = RTE_ETH_SPEED_NUM_10G;
1222 	link.link_autoneg = RTE_ETH_LINK_FIXED;
1223 	rte_eth_linkstatus_set(dev, &link);
1224 
1225 	hw->adapter_stopped = 1;
1226 	dev->data->dev_started = 0;
1227 
1228 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1229 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1230 	for (i = 0; i < dev->data->nb_tx_queues; i++)
1231 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1232 
1233 	return 0;
1234 }
1235 
1236 static void
1237 vmxnet3_free_queues(struct rte_eth_dev *dev)
1238 {
1239 	int i;
1240 
1241 	PMD_INIT_FUNC_TRACE();
1242 
1243 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1244 		vmxnet3_dev_rx_queue_release(dev, i);
1245 	dev->data->nb_rx_queues = 0;
1246 
1247 	for (i = 0; i < dev->data->nb_tx_queues; i++)
1248 		vmxnet3_dev_tx_queue_release(dev, i);
1249 	dev->data->nb_tx_queues = 0;
1250 }
1251 
1252 /*
1253  * Reset and stop device.
1254  */
1255 static int
1256 vmxnet3_dev_close(struct rte_eth_dev *dev)
1257 {
1258 	int ret;
1259 	PMD_INIT_FUNC_TRACE();
1260 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1261 		return 0;
1262 
1263 	ret = vmxnet3_dev_stop(dev);
1264 	vmxnet3_free_queues(dev);
1265 
1266 	return ret;
1267 }
1268 
1269 static int
1270 vmxnet3_dev_reset(struct rte_eth_dev *dev)
1271 {
1272 	int ret;
1273 
1274 	ret = eth_vmxnet3_dev_uninit(dev);
1275 	if (ret)
1276 		return ret;
1277 	ret = eth_vmxnet3_dev_init(dev);
1278 	return ret;
1279 }
1280 
1281 static void
1282 vmxnet3_hw_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1283 			struct UPT1_TxStats *res)
1284 {
1285 #define VMXNET3_UPDATE_TX_STAT(h, i, f, r)		\
1286 		((r)->f = (h)->tqd_start[(i)].stats.f +	\
1287 			(h)->saved_tx_stats[(i)].f)
1288 
1289 	VMXNET3_UPDATE_TX_STAT(hw, q, ucastPktsTxOK, res);
1290 	VMXNET3_UPDATE_TX_STAT(hw, q, mcastPktsTxOK, res);
1291 	VMXNET3_UPDATE_TX_STAT(hw, q, bcastPktsTxOK, res);
1292 	VMXNET3_UPDATE_TX_STAT(hw, q, ucastBytesTxOK, res);
1293 	VMXNET3_UPDATE_TX_STAT(hw, q, mcastBytesTxOK, res);
1294 	VMXNET3_UPDATE_TX_STAT(hw, q, bcastBytesTxOK, res);
1295 	VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxError, res);
1296 	VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxDiscard, res);
1297 
1298 #undef VMXNET3_UPDATE_TX_STAT
1299 }
1300 
1301 static void
1302 vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1303 			struct UPT1_RxStats *res)
1304 {
1305 #define VMXNET3_UPDATE_RX_STAT(h, i, f, r)		\
1306 		((r)->f = (h)->rqd_start[(i)].stats.f +	\
1307 			(h)->saved_rx_stats[(i)].f)
1308 
1309 	VMXNET3_UPDATE_RX_STAT(hw, q, ucastPktsRxOK, res);
1310 	VMXNET3_UPDATE_RX_STAT(hw, q, mcastPktsRxOK, res);
1311 	VMXNET3_UPDATE_RX_STAT(hw, q, bcastPktsRxOK, res);
1312 	VMXNET3_UPDATE_RX_STAT(hw, q, ucastBytesRxOK, res);
1313 	VMXNET3_UPDATE_RX_STAT(hw, q, mcastBytesRxOK, res);
1314 	VMXNET3_UPDATE_RX_STAT(hw, q, bcastBytesRxOK, res);
1315 	VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res);
1316 	VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res);
1317 
1318 #undef VMXNET3_UPDATE_RX_STAT
1319 }
1320 
1321 static void
1322 vmxnet3_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1323 					struct UPT1_TxStats *res)
1324 {
1325 		vmxnet3_hw_tx_stats_get(hw, q, res);
1326 
1327 #define VMXNET3_REDUCE_SNAPSHOT_TX_STAT(h, i, f, r)	\
1328 		((r)->f -= (h)->snapshot_tx_stats[(i)].f)
1329 
1330 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastPktsTxOK, res);
1331 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastPktsTxOK, res);
1332 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastPktsTxOK, res);
1333 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastBytesTxOK, res);
1334 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastBytesTxOK, res);
1335 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastBytesTxOK, res);
1336 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxError, res);
1337 	VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxDiscard, res);
1338 
1339 #undef VMXNET3_REDUCE_SNAPSHOT_TX_STAT
1340 }
1341 
1342 static void
1343 vmxnet3_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
1344 					struct UPT1_RxStats *res)
1345 {
1346 		vmxnet3_hw_rx_stats_get(hw, q, res);
1347 
1348 #define VMXNET3_REDUCE_SNAPSHOT_RX_STAT(h, i, f, r)	\
1349 		((r)->f -= (h)->snapshot_rx_stats[(i)].f)
1350 
1351 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastPktsRxOK, res);
1352 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastPktsRxOK, res);
1353 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastPktsRxOK, res);
1354 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastBytesRxOK, res);
1355 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastBytesRxOK, res);
1356 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastBytesRxOK, res);
1357 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxError, res);
1358 	VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxOutOfBuf, res);
1359 
1360 #undef VMXNET3_REDUCE_SNAPSHOT_RX_STAT
1361 }
1362 
1363 static void
1364 vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
1365 {
1366 	unsigned int i;
1367 
1368 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1369 
1370 	for (i = 0; i < hw->num_tx_queues; i++)
1371 		vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
1372 	for (i = 0; i < hw->num_rx_queues; i++)
1373 		vmxnet3_hw_rx_stats_get(hw, i, &hw->saved_rx_stats[i]);
1374 }
1375 
1376 static int
1377 vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
1378 			     struct rte_eth_xstat_name *xstats_names,
1379 			     unsigned int n)
1380 {
1381 	unsigned int i, t, count = 0;
1382 	unsigned int nstats =
1383 		dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
1384 		dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
1385 
1386 	if (!xstats_names || n < nstats)
1387 		return nstats;
1388 
1389 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1390 		if (!dev->data->rx_queues[i])
1391 			continue;
1392 
1393 		for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
1394 			snprintf(xstats_names[count].name,
1395 				 sizeof(xstats_names[count].name),
1396 				 "rx_q%u_%s", i,
1397 				 vmxnet3_rxq_stat_strings[t].name);
1398 			count++;
1399 		}
1400 	}
1401 
1402 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1403 		if (!dev->data->tx_queues[i])
1404 			continue;
1405 
1406 		for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
1407 			snprintf(xstats_names[count].name,
1408 				 sizeof(xstats_names[count].name),
1409 				 "tx_q%u_%s", i,
1410 				 vmxnet3_txq_stat_strings[t].name);
1411 			count++;
1412 		}
1413 	}
1414 
1415 	return count;
1416 }
1417 
1418 static int
1419 vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1420 		       unsigned int n)
1421 {
1422 	unsigned int i, t, count = 0;
1423 	unsigned int nstats =
1424 		dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
1425 		dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
1426 
1427 	if (n < nstats)
1428 		return nstats;
1429 
1430 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1431 		struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
1432 
1433 		if (rxq == NULL)
1434 			continue;
1435 
1436 		for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
1437 			xstats[count].value = *(uint64_t *)(((char *)&rxq->stats) +
1438 				vmxnet3_rxq_stat_strings[t].offset);
1439 			xstats[count].id = count;
1440 			count++;
1441 		}
1442 	}
1443 
1444 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1445 		struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
1446 
1447 		if (txq == NULL)
1448 			continue;
1449 
1450 		for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
1451 			xstats[count].value = *(uint64_t *)(((char *)&txq->stats) +
1452 				vmxnet3_txq_stat_strings[t].offset);
1453 			xstats[count].id = count;
1454 			count++;
1455 		}
1456 	}
1457 
1458 	return count;
1459 }
1460 
1461 static int
1462 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1463 {
1464 	unsigned int i;
1465 	struct vmxnet3_hw *hw = dev->data->dev_private;
1466 	struct UPT1_TxStats txStats;
1467 	struct UPT1_RxStats rxStats;
1468 
1469 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1470 
1471 	for (i = 0; i < hw->num_tx_queues; i++) {
1472 		vmxnet3_tx_stats_get(hw, i, &txStats);
1473 
1474 		stats->q_opackets[i] = txStats.ucastPktsTxOK +
1475 			txStats.mcastPktsTxOK +
1476 			txStats.bcastPktsTxOK;
1477 
1478 		stats->q_obytes[i] = txStats.ucastBytesTxOK +
1479 			txStats.mcastBytesTxOK +
1480 			txStats.bcastBytesTxOK;
1481 
1482 		stats->opackets += stats->q_opackets[i];
1483 		stats->obytes += stats->q_obytes[i];
1484 		stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
1485 	}
1486 
1487 	for (i = 0; i < hw->num_rx_queues; i++) {
1488 		vmxnet3_rx_stats_get(hw, i, &rxStats);
1489 
1490 		stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
1491 			rxStats.mcastPktsRxOK +
1492 			rxStats.bcastPktsRxOK;
1493 
1494 		stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
1495 			rxStats.mcastBytesRxOK +
1496 			rxStats.bcastBytesRxOK;
1497 
1498 		stats->ipackets += stats->q_ipackets[i];
1499 		stats->ibytes += stats->q_ibytes[i];
1500 
1501 		stats->q_errors[i] = rxStats.pktsRxError;
1502 		stats->ierrors += rxStats.pktsRxError;
1503 		stats->imissed += rxStats.pktsRxOutOfBuf;
1504 	}
1505 
1506 	return 0;
1507 }
1508 
1509 static int
1510 vmxnet3_dev_stats_reset(struct rte_eth_dev *dev)
1511 {
1512 	unsigned int i;
1513 	struct vmxnet3_hw *hw = dev->data->dev_private;
1514 	struct UPT1_TxStats txStats = {0};
1515 	struct UPT1_RxStats rxStats = {0};
1516 
1517 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1518 
1519 	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1520 
1521 	for (i = 0; i < hw->num_tx_queues; i++) {
1522 		vmxnet3_hw_tx_stats_get(hw, i, &txStats);
1523 		memcpy(&hw->snapshot_tx_stats[i], &txStats,
1524 			sizeof(hw->snapshot_tx_stats[0]));
1525 	}
1526 	for (i = 0; i < hw->num_rx_queues; i++) {
1527 		vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
1528 		memcpy(&hw->snapshot_rx_stats[i], &rxStats,
1529 			sizeof(hw->snapshot_rx_stats[0]));
1530 	}
1531 
1532 	return 0;
1533 }
1534 
1535 static int
1536 vmxnet3_dev_info_get(struct rte_eth_dev *dev,
1537 		     struct rte_eth_dev_info *dev_info)
1538 {
1539 	struct vmxnet3_hw *hw = dev->data->dev_private;
1540 	int queues = 0;
1541 
1542 	if (VMXNET3_VERSION_GE_6(hw)) {
1543 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1544 				       VMXNET3_CMD_GET_MAX_QUEUES_CONF);
1545 		queues = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1546 
1547 		if (queues > 0) {
1548 			dev_info->max_rx_queues =
1549 			  RTE_MIN(VMXNET3_EXT_MAX_RX_QUEUES, ((queues >> 8) & 0xff));
1550 			dev_info->max_tx_queues =
1551 			  RTE_MIN(VMXNET3_EXT_MAX_TX_QUEUES, (queues & 0xff));
1552 		} else {
1553 			dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
1554 			dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
1555 		}
1556 	} else {
1557 		dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
1558 		dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
1559 	}
1560 
1561 	dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
1562 	dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
1563 	dev_info->min_mtu = VMXNET3_MIN_MTU;
1564 	dev_info->max_mtu = VMXNET3_MAX_MTU;
1565 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
1566 	dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
1567 
1568 	dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
1569 
1570 	if (VMXNET3_VERSION_GE_4(hw)) {
1571 		dev_info->flow_type_rss_offloads |= VMXNET3_V4_RSS_MASK;
1572 	}
1573 
1574 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1575 		.nb_max = VMXNET3_RX_RING_MAX_SIZE,
1576 		.nb_min = VMXNET3_DEF_RX_RING_SIZE,
1577 		.nb_align = 1,
1578 	};
1579 
1580 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1581 		.nb_max = VMXNET3_TX_RING_MAX_SIZE,
1582 		.nb_min = VMXNET3_DEF_TX_RING_SIZE,
1583 		.nb_align = 1,
1584 		.nb_seg_max = VMXNET3_TX_MAX_SEG,
1585 		.nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
1586 	};
1587 
1588 	dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP;
1589 	dev_info->rx_queue_offload_capa = 0;
1590 	dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP;
1591 	dev_info->tx_queue_offload_capa = 0;
1592 	if (hw->rss_conf == NULL) {
1593 		/* RSS not configured */
1594 		dev_info->reta_size = 0;
1595 	} else {
1596 		dev_info->reta_size = hw->rss_conf->indTableSize;
1597 	}
1598 	return 0;
1599 }
1600 
1601 static int
1602 vmxnet3_hw_ver_get(struct rte_eth_dev *dev,
1603 		   char *fw_version, size_t fw_size)
1604 {
1605 	int ret;
1606 	struct vmxnet3_hw *hw = dev->data->dev_private;
1607 
1608 	ret = snprintf(fw_version, fw_size, "v%d", hw->version);
1609 
1610 	ret += 1; /* add the size of '\0' */
1611 	if (fw_size < (uint32_t)ret)
1612 		return ret;
1613 	else
1614 		return 0;
1615 }
1616 
1617 static const uint32_t *
1618 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1619 {
1620 	static const uint32_t ptypes[] = {
1621 		RTE_PTYPE_L3_IPV4_EXT,
1622 		RTE_PTYPE_L3_IPV4,
1623 		RTE_PTYPE_UNKNOWN
1624 	};
1625 
1626 	if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
1627 		return ptypes;
1628 	return NULL;
1629 }
1630 
1631 static int
1632 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1633 {
1634 	struct vmxnet3_hw *hw = dev->data->dev_private;
1635 
1636 	rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)(hw->perm_addr));
1637 	vmxnet3_write_mac(hw, mac_addr->addr_bytes);
1638 	return 0;
1639 }
1640 
1641 static int
1642 vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1643 {
1644 	struct vmxnet3_hw *hw = dev->data->dev_private;
1645 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
1646 
1647 	if (mtu < VMXNET3_MIN_MTU)
1648 		return -EINVAL;
1649 
1650 	if (VMXNET3_VERSION_GE_6(hw)) {
1651 		if (mtu > VMXNET3_V6_MAX_MTU)
1652 			return -EINVAL;
1653 	} else {
1654 		if (mtu > VMXNET3_MAX_MTU) {
1655 			PMD_DRV_LOG(ERR, "MTU %d too large in device version v%d",
1656 				    mtu, hw->version);
1657 			return -EINVAL;
1658 		}
1659 	}
1660 
1661 	dev->data->mtu = mtu;
1662 	/* update max frame size */
1663 	dev->data->dev_conf.rxmode.mtu = frame_size;
1664 
1665 	if (dev->data->dev_started == 0)
1666 		return 0;
1667 
1668     /* changing mtu for vmxnet3 pmd does not require a restart
1669      * as it does not need to repopulate the rx rings to support
1670      * different mtu size.  We stop and restart the device here
1671      * just to pass the mtu info to the backend.
1672      */
1673 	vmxnet3_dev_stop(dev);
1674 	vmxnet3_dev_start(dev);
1675 
1676 	return 0;
1677 }
1678 
1679 /* return 0 means link status changed, -1 means not changed */
1680 static int
1681 __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
1682 			  __rte_unused int wait_to_complete)
1683 {
1684 	struct vmxnet3_hw *hw = dev->data->dev_private;
1685 	struct rte_eth_link link;
1686 	uint32_t ret;
1687 
1688 	memset(&link, 0, sizeof(link));
1689 
1690 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
1691 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1692 
1693 	if (ret & 0x1)
1694 		link.link_status = RTE_ETH_LINK_UP;
1695 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1696 	link.link_speed = RTE_ETH_SPEED_NUM_10G;
1697 	link.link_autoneg = RTE_ETH_LINK_FIXED;
1698 
1699 	return rte_eth_linkstatus_set(dev, &link);
1700 }
1701 
1702 static int
1703 vmxnet3_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1704 {
1705 	/* Link status doesn't change for stopped dev */
1706 	if (dev->data->dev_started == 0)
1707 		return -1;
1708 
1709 	return __vmxnet3_dev_link_update(dev, wait_to_complete);
1710 }
1711 
1712 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
1713 static void
1714 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
1715 {
1716 	struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1717 
1718 	if (set)
1719 		rxConf->rxMode = rxConf->rxMode | feature;
1720 	else
1721 		rxConf->rxMode = rxConf->rxMode & (~feature);
1722 
1723 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
1724 }
1725 
1726 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1727 static int
1728 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
1729 {
1730 	struct vmxnet3_hw *hw = dev->data->dev_private;
1731 	uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1732 
1733 	memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
1734 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
1735 
1736 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1737 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1738 
1739 	return 0;
1740 }
1741 
1742 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1743 static int
1744 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
1745 {
1746 	struct vmxnet3_hw *hw = dev->data->dev_private;
1747 	uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1748 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1749 
1750 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1751 		memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1752 	else
1753 		memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1754 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
1755 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1756 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1757 
1758 	return 0;
1759 }
1760 
1761 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1762 static int
1763 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
1764 {
1765 	struct vmxnet3_hw *hw = dev->data->dev_private;
1766 
1767 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
1768 
1769 	return 0;
1770 }
1771 
1772 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1773 static int
1774 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
1775 {
1776 	struct vmxnet3_hw *hw = dev->data->dev_private;
1777 
1778 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
1779 
1780 	return 0;
1781 }
1782 
1783 /* Enable/disable filter on vlan */
1784 static int
1785 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
1786 {
1787 	struct vmxnet3_hw *hw = dev->data->dev_private;
1788 	struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1789 	uint32_t *vf_table = rxConf->vfTable;
1790 
1791 	/* save state for restore */
1792 	if (on)
1793 		VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1794 	else
1795 		VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1796 
1797 	/* don't change active filter if in promiscuous mode */
1798 	if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
1799 		return 0;
1800 
1801 	/* set in hardware */
1802 	if (on)
1803 		VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
1804 	else
1805 		VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
1806 
1807 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1808 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1809 	return 0;
1810 }
1811 
1812 static int
1813 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1814 {
1815 	struct vmxnet3_hw *hw = dev->data->dev_private;
1816 	Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
1817 	uint32_t *vf_table = devRead->rxFilterConf.vfTable;
1818 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1819 
1820 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1821 		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1822 			devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1823 		else
1824 			devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1825 
1826 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1827 				       VMXNET3_CMD_UPDATE_FEATURE);
1828 	}
1829 
1830 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1831 		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1832 			memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1833 		else
1834 			memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1835 
1836 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1837 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1838 	}
1839 
1840 	return 0;
1841 }
1842 
1843 static void
1844 vmxnet3_process_events(struct rte_eth_dev *dev)
1845 {
1846 	struct vmxnet3_hw *hw = dev->data->dev_private;
1847 	uint32_t events = hw->shared->ecr;
1848 	int i;
1849 
1850 	if (!events)
1851 		return;
1852 
1853 	/*
1854 	 * ECR bits when written with 1b are cleared. Hence write
1855 	 * events back to ECR so that the bits which were set will be reset.
1856 	 */
1857 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
1858 
1859 	/* Check if link state has changed */
1860 	if (events & VMXNET3_ECR_LINK) {
1861 		PMD_DRV_LOG(DEBUG, "Process events: VMXNET3_ECR_LINK event");
1862 		if (vmxnet3_dev_link_update(dev, 0) == 0)
1863 			rte_eth_dev_callback_process(dev,
1864 						     RTE_ETH_EVENT_INTR_LSC,
1865 						     NULL);
1866 	}
1867 
1868 	/* Check if there is an error on xmit/recv queues */
1869 	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
1870 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1871 				       VMXNET3_CMD_GET_QUEUE_STATUS);
1872 
1873 		PMD_DRV_LOG(ERR, "queue error event 0x%x for "
1874 			    RTE_ETHER_ADDR_PRT_FMT, events,
1875 			    hw->perm_addr[0], hw->perm_addr[1],
1876 			    hw->perm_addr[2], hw->perm_addr[3],
1877 			    hw->perm_addr[4], hw->perm_addr[5]);
1878 
1879 		for (i = 0; i < hw->num_tx_queues; i++) {
1880 			if (hw->tqd_start[i].status.stopped)
1881 				PMD_DRV_LOG(ERR, "tq %d error 0x%x",
1882 					    i, hw->tqd_start[i].status.error);
1883 		}
1884 		for (i = 0; i < hw->num_rx_queues; i++) {
1885 			if (hw->rqd_start[i].status.stopped)
1886 				PMD_DRV_LOG(ERR, "rq %d error 0x%x",
1887 					    i, hw->rqd_start[i].status.error);
1888 		}
1889 
1890 		/* Have to reset the device */
1891 		/* Notify the application so that it can reset the device */
1892 		rte_eth_dev_callback_process(dev,
1893 					     RTE_ETH_EVENT_INTR_RESET,
1894 					     NULL);
1895 	}
1896 
1897 	if (events & VMXNET3_ECR_DIC)
1898 		PMD_DRV_LOG(DEBUG, "Device implementation change event.");
1899 
1900 	if (events & VMXNET3_ECR_DEBUG)
1901 		PMD_DRV_LOG(DEBUG, "Debug event generated by device.");
1902 }
1903 
1904 static void
1905 vmxnet3_interrupt_handler(void *param)
1906 {
1907 	struct rte_eth_dev *dev = param;
1908 	struct vmxnet3_hw *hw = dev->data->dev_private;
1909 	uint32_t events;
1910 	uint8 *eventIntrIdx;
1911 	uint32 *intrCtrl;
1912 
1913 	PMD_INIT_FUNC_TRACE();
1914 
1915 	vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl);
1916 	vmxnet3_disable_intr(hw, *eventIntrIdx);
1917 
1918 	events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
1919 	if (events == 0)
1920 		goto done;
1921 
1922 	RTE_LOG(DEBUG, PMD, "Reading events: 0x%X", events);
1923 	vmxnet3_process_events(dev);
1924 done:
1925 	vmxnet3_enable_intr(hw, *eventIntrIdx);
1926 }
1927 
1928 static int
1929 vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1930 {
1931 	struct vmxnet3_hw *hw = dev->data->dev_private;
1932 
1933 	vmxnet3_enable_intr(hw,
1934 			    rte_intr_vec_list_index_get(dev->intr_handle,
1935 							       queue_id));
1936 
1937 	return 0;
1938 }
1939 
1940 static int
1941 vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1942 {
1943 	struct vmxnet3_hw *hw = dev->data->dev_private;
1944 
1945 	vmxnet3_disable_intr(hw,
1946 		rte_intr_vec_list_index_get(dev->intr_handle, queue_id));
1947 
1948 	return 0;
1949 }
1950 
1951 RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
1952 RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
1953 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
1954 RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_init, init, NOTICE);
1955 RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_driver, driver, NOTICE);
1956 
1957 static int
1958 vmxnet3_rss_reta_update(struct rte_eth_dev *dev,
1959 			struct rte_eth_rss_reta_entry64 *reta_conf,
1960 			uint16_t reta_size)
1961 {
1962 	int i, idx, shift;
1963 	struct vmxnet3_hw *hw = dev->data->dev_private;
1964 	struct VMXNET3_RSSConf *dev_rss_conf = hw->rss_conf;
1965 
1966 	if (reta_size != dev_rss_conf->indTableSize) {
1967 		PMD_DRV_LOG(ERR,
1968 			"The size of hash lookup table configured (%d) doesn't match "
1969 			"the supported number (%d)",
1970 			reta_size, dev_rss_conf->indTableSize);
1971 		return -EINVAL;
1972 	}
1973 
1974 	for (i = 0; i < reta_size; i++) {
1975 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1976 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1977 		if (reta_conf[idx].mask & RTE_BIT64(shift))
1978 			dev_rss_conf->indTable[i] = (uint8_t)reta_conf[idx].reta[shift];
1979 	}
1980 
1981 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1982 				VMXNET3_CMD_UPDATE_RSSIDT);
1983 
1984 	return 0;
1985 }
1986 
1987 static int
1988 vmxnet3_rss_reta_query(struct rte_eth_dev *dev,
1989 		       struct rte_eth_rss_reta_entry64 *reta_conf,
1990 		       uint16_t reta_size)
1991 {
1992 	int i, idx, shift;
1993 	struct vmxnet3_hw *hw = dev->data->dev_private;
1994 	struct VMXNET3_RSSConf *dev_rss_conf = hw->rss_conf;
1995 
1996 	if (reta_size != dev_rss_conf->indTableSize) {
1997 		PMD_DRV_LOG(ERR,
1998 			"Size of requested hash lookup table (%d) doesn't "
1999 			"match the configured size (%d)",
2000 			reta_size, dev_rss_conf->indTableSize);
2001 		return -EINVAL;
2002 	}
2003 
2004 	for (i = 0; i < reta_size; i++) {
2005 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2006 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2007 		if (reta_conf[idx].mask & RTE_BIT64(shift))
2008 			reta_conf[idx].reta[shift] = dev_rss_conf->indTable[i];
2009 	}
2010 
2011 	return 0;
2012 }
2013