xref: /dpdk/drivers/net/ngbe/ngbe_ethdev.c (revision baca8ec066dc6fdc42374e8eafd67eecfd6c9267)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9 
10 #include <rte_alarm.h>
11 
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16 #include "ngbe_regs_group.h"
17 
18 static const struct reg_info ngbe_regs_general[] = {
19 	{NGBE_RST, 1, 1, "NGBE_RST"},
20 	{NGBE_STAT, 1, 1, "NGBE_STAT"},
21 	{NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
22 	{NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
23 	{NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
24 	{NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
25 	{0, 0, 0, ""}
26 };
27 
28 static const struct reg_info ngbe_regs_nvm[] = {
29 	{0, 0, 0, ""}
30 };
31 
32 static const struct reg_info ngbe_regs_interrupt[] = {
33 	{0, 0, 0, ""}
34 };
35 
36 static const struct reg_info ngbe_regs_fctl_others[] = {
37 	{0, 0, 0, ""}
38 };
39 
40 static const struct reg_info ngbe_regs_rxdma[] = {
41 	{0, 0, 0, ""}
42 };
43 
44 static const struct reg_info ngbe_regs_rx[] = {
45 	{0, 0, 0, ""}
46 };
47 
48 static struct reg_info ngbe_regs_tx[] = {
49 	{0, 0, 0, ""}
50 };
51 
52 static const struct reg_info ngbe_regs_wakeup[] = {
53 	{0, 0, 0, ""}
54 };
55 
56 static const struct reg_info ngbe_regs_mac[] = {
57 	{0, 0, 0, ""}
58 };
59 
60 static const struct reg_info ngbe_regs_diagnostic[] = {
61 	{0, 0, 0, ""},
62 };
63 
64 /* PF registers */
65 static const struct reg_info *ngbe_regs_others[] = {
66 				ngbe_regs_general,
67 				ngbe_regs_nvm,
68 				ngbe_regs_interrupt,
69 				ngbe_regs_fctl_others,
70 				ngbe_regs_rxdma,
71 				ngbe_regs_rx,
72 				ngbe_regs_tx,
73 				ngbe_regs_wakeup,
74 				ngbe_regs_mac,
75 				ngbe_regs_diagnostic,
76 				NULL};
77 
78 static int ngbe_dev_close(struct rte_eth_dev *dev);
79 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
80 				int wait_to_complete);
81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
84 					uint16_t queue);
85 
86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
91 static void ngbe_dev_interrupt_handler(void *param);
92 static void ngbe_configure_msix(struct rte_eth_dev *dev);
93 static void ngbe_pbthresh_set(struct rte_eth_dev *dev);
94 
95 #define NGBE_SET_HWSTRIP(h, q) do {\
96 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
97 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
98 		(h)->bitmap[idx] |= 1 << bit;\
99 	} while (0)
100 
101 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
102 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
103 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
104 		(h)->bitmap[idx] &= ~(1 << bit);\
105 	} while (0)
106 
107 #define NGBE_GET_HWSTRIP(h, q, r) do {\
108 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
109 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
110 		(r) = (h)->bitmap[idx] >> bit & 1;\
111 	} while (0)
112 
113 /*
114  * The set of PCI devices this driver supports
115  */
116 static const struct rte_pci_id pci_id_ngbe_map[] = {
117 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
118 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
119 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
120 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
121 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
122 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
123 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
124 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
128 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
129 	{ .vendor_id = 0, /* sentinel */ },
130 };
131 
132 static const struct rte_eth_desc_lim rx_desc_lim = {
133 	.nb_max = NGBE_RING_DESC_MAX,
134 	.nb_min = NGBE_RING_DESC_MIN,
135 	.nb_align = NGBE_RXD_ALIGN,
136 };
137 
138 static const struct rte_eth_desc_lim tx_desc_lim = {
139 	.nb_max = NGBE_RING_DESC_MAX,
140 	.nb_min = NGBE_RING_DESC_MIN,
141 	.nb_align = NGBE_TXD_ALIGN,
142 	.nb_seg_max = NGBE_TX_MAX_SEG,
143 	.nb_mtu_seg_max = NGBE_TX_MAX_SEG,
144 };
145 
146 static const struct eth_dev_ops ngbe_eth_dev_ops;
147 
148 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
149 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
150 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
151 	/* MNG RxTx */
152 	HW_XSTAT(mng_bmc2host_packets),
153 	HW_XSTAT(mng_host2bmc_packets),
154 	/* Basic RxTx */
155 	HW_XSTAT(rx_packets),
156 	HW_XSTAT(tx_packets),
157 	HW_XSTAT(rx_bytes),
158 	HW_XSTAT(tx_bytes),
159 	HW_XSTAT(rx_total_bytes),
160 	HW_XSTAT(rx_total_packets),
161 	HW_XSTAT(tx_total_packets),
162 	HW_XSTAT(rx_total_missed_packets),
163 	HW_XSTAT(rx_broadcast_packets),
164 	HW_XSTAT(tx_broadcast_packets),
165 	HW_XSTAT(rx_multicast_packets),
166 	HW_XSTAT(tx_multicast_packets),
167 	HW_XSTAT(rx_management_packets),
168 	HW_XSTAT(tx_management_packets),
169 	HW_XSTAT(rx_management_dropped),
170 	HW_XSTAT(rx_dma_drop),
171 	HW_XSTAT(tx_dma_drop),
172 	HW_XSTAT(tx_secdrp_packets),
173 
174 	/* Basic Error */
175 	HW_XSTAT(rx_crc_errors),
176 	HW_XSTAT(rx_illegal_byte_errors),
177 	HW_XSTAT(rx_error_bytes),
178 	HW_XSTAT(rx_mac_short_packet_dropped),
179 	HW_XSTAT(rx_length_errors),
180 	HW_XSTAT(rx_undersize_errors),
181 	HW_XSTAT(rx_fragment_errors),
182 	HW_XSTAT(rx_oversize_cnt),
183 	HW_XSTAT(rx_jabber_errors),
184 	HW_XSTAT(rx_l3_l4_xsum_error),
185 	HW_XSTAT(mac_local_errors),
186 	HW_XSTAT(mac_remote_errors),
187 
188 	/* PB Stats */
189 	HW_XSTAT(rx_up_dropped),
190 	HW_XSTAT(rdb_pkt_cnt),
191 	HW_XSTAT(rdb_repli_cnt),
192 	HW_XSTAT(rdb_drp_cnt),
193 
194 	/* MACSEC */
195 	HW_XSTAT(tx_macsec_pkts_untagged),
196 	HW_XSTAT(tx_macsec_pkts_encrypted),
197 	HW_XSTAT(tx_macsec_pkts_protected),
198 	HW_XSTAT(tx_macsec_octets_encrypted),
199 	HW_XSTAT(tx_macsec_octets_protected),
200 	HW_XSTAT(rx_macsec_pkts_untagged),
201 	HW_XSTAT(rx_macsec_pkts_badtag),
202 	HW_XSTAT(rx_macsec_pkts_nosci),
203 	HW_XSTAT(rx_macsec_pkts_unknownsci),
204 	HW_XSTAT(rx_macsec_octets_decrypted),
205 	HW_XSTAT(rx_macsec_octets_validated),
206 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
207 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
208 	HW_XSTAT(rx_macsec_sc_pkts_late),
209 	HW_XSTAT(rx_macsec_sa_pkts_ok),
210 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
211 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
212 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
213 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
214 
215 	/* MAC RxTx */
216 	HW_XSTAT(rx_size_64_packets),
217 	HW_XSTAT(rx_size_65_to_127_packets),
218 	HW_XSTAT(rx_size_128_to_255_packets),
219 	HW_XSTAT(rx_size_256_to_511_packets),
220 	HW_XSTAT(rx_size_512_to_1023_packets),
221 	HW_XSTAT(rx_size_1024_to_max_packets),
222 	HW_XSTAT(tx_size_64_packets),
223 	HW_XSTAT(tx_size_65_to_127_packets),
224 	HW_XSTAT(tx_size_128_to_255_packets),
225 	HW_XSTAT(tx_size_256_to_511_packets),
226 	HW_XSTAT(tx_size_512_to_1023_packets),
227 	HW_XSTAT(tx_size_1024_to_max_packets),
228 
229 	/* Flow Control */
230 	HW_XSTAT(tx_xon_packets),
231 	HW_XSTAT(rx_xon_packets),
232 	HW_XSTAT(tx_xoff_packets),
233 	HW_XSTAT(rx_xoff_packets),
234 
235 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
236 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
237 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
238 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
239 };
240 
241 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
242 			   sizeof(rte_ngbe_stats_strings[0]))
243 
244 /* Per-queue statistics */
245 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
246 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
247 	QP_XSTAT(rx_qp_packets),
248 	QP_XSTAT(tx_qp_packets),
249 	QP_XSTAT(rx_qp_bytes),
250 	QP_XSTAT(tx_qp_bytes),
251 	QP_XSTAT(rx_qp_mc_packets),
252 };
253 
254 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
255 			   sizeof(rte_ngbe_qp_strings[0]))
256 
257 static inline int32_t
258 ngbe_pf_reset_hw(struct ngbe_hw *hw)
259 {
260 	uint32_t ctrl_ext;
261 	int32_t status;
262 
263 	status = hw->mac.reset_hw(hw);
264 
265 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
266 	/* let hardware know driver is loaded */
267 	ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
268 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
269 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
270 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
271 	ngbe_flush(hw);
272 
273 	if (status == NGBE_ERR_SFP_NOT_PRESENT)
274 		status = 0;
275 	return status;
276 }
277 
278 static inline void
279 ngbe_enable_intr(struct rte_eth_dev *dev)
280 {
281 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
282 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
283 
284 	wr32(hw, NGBE_IENMISC, intr->mask_misc);
285 	wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
286 	ngbe_flush(hw);
287 }
288 
289 static void
290 ngbe_disable_intr(struct ngbe_hw *hw)
291 {
292 	PMD_INIT_FUNC_TRACE();
293 
294 	wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
295 	ngbe_flush(hw);
296 }
297 
298 /*
299  * Ensure that all locks are released before first NVM or PHY access
300  */
301 static void
302 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
303 {
304 	uint16_t mask;
305 
306 	/*
307 	 * These ones are more tricky since they are common to all ports; but
308 	 * swfw_sync retries last long enough (1s) to be almost sure that if
309 	 * lock can not be taken it is due to an improper lock of the
310 	 * semaphore.
311 	 */
312 	mask = NGBE_MNGSEM_SWPHY |
313 	       NGBE_MNGSEM_SWMBX |
314 	       NGBE_MNGSEM_SWFLASH;
315 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
316 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
317 
318 	hw->mac.release_swfw_sync(hw, mask);
319 }
320 
321 static int
322 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
323 {
324 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
325 	struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
326 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
327 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
328 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
329 	const struct rte_memzone *mz;
330 	uint32_t ctrl_ext;
331 	u32 led_conf = 0;
332 	int err, ret;
333 
334 	PMD_INIT_FUNC_TRACE();
335 
336 	eth_dev->dev_ops = &ngbe_eth_dev_ops;
337 	eth_dev->rx_queue_count       = ngbe_dev_rx_queue_count;
338 	eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
339 	eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
340 	eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
341 	eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
342 	eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
343 
344 	/*
345 	 * For secondary processes, we don't initialise any further as primary
346 	 * has already done this work. Only check we don't need a different
347 	 * Rx and Tx function.
348 	 */
349 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
350 		struct ngbe_tx_queue *txq;
351 		/* Tx queue function in primary, set by last queue initialized
352 		 * Tx queue may not initialized by primary process
353 		 */
354 		if (eth_dev->data->tx_queues) {
355 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
356 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
357 			ngbe_set_tx_function(eth_dev, txq);
358 		} else {
359 			/* Use default Tx function if we get here */
360 			PMD_INIT_LOG(NOTICE,
361 				"No Tx queues configured yet. Using default Tx function.");
362 		}
363 
364 		ngbe_set_rx_function(eth_dev);
365 
366 		return 0;
367 	}
368 
369 	rte_eth_copy_pci_info(eth_dev, pci_dev);
370 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
371 
372 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
373 
374 	/* Vendor and Device ID need to be set before init of shared code */
375 	hw->back = pci_dev;
376 	hw->port_id = eth_dev->data->port_id;
377 	hw->device_id = pci_dev->id.device_id;
378 	hw->vendor_id = pci_dev->id.vendor_id;
379 	if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) {
380 		hw->sub_system_id = pci_dev->id.subsystem_device_id;
381 	} else {
382 		u32 ssid = 0;
383 
384 		err = ngbe_flash_read_dword(hw, 0xFFFDC, &ssid);
385 		if (err) {
386 			PMD_INIT_LOG(ERR,
387 				"Read of internal subsystem device id failed");
388 			return -ENODEV;
389 		}
390 		hw->sub_system_id = (u16)ssid >> 8 | (u16)ssid << 8;
391 	}
392 	ngbe_map_device_id(hw);
393 
394 	/* Reserve memory for interrupt status block */
395 	mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
396 		NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
397 	if (mz == NULL)
398 		return -ENOMEM;
399 
400 	hw->isb_dma = TMZ_PADDR(mz);
401 	hw->isb_mem = TMZ_VADDR(mz);
402 
403 	/* Initialize the shared code (base driver) */
404 	err = ngbe_init_shared_code(hw);
405 	if (err != 0) {
406 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
407 		return -EIO;
408 	}
409 
410 	/* Unlock any pending hardware semaphore */
411 	ngbe_swfw_lock_reset(hw);
412 	ngbe_set_ncsi_status(hw);
413 
414 	/* Get Hardware Flow Control setting */
415 	hw->fc.requested_mode = ngbe_fc_full;
416 	hw->fc.current_mode = ngbe_fc_full;
417 	hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
418 	hw->fc.low_water = NGBE_FC_XON_LOTH;
419 	hw->fc.high_water = NGBE_FC_XOFF_HITH;
420 	hw->fc.send_xon = 1;
421 
422 	err = hw->rom.init_params(hw);
423 	if (err != 0) {
424 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
425 		return -EIO;
426 	}
427 
428 	/* Make sure we have a good EEPROM before we read from it */
429 	err = hw->rom.validate_checksum(hw, NULL);
430 	if (err != 0) {
431 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
432 		return -EIO;
433 	}
434 
435 	err = hw->phy.led_oem_chk(hw, &led_conf);
436 	if (err == 0)
437 		hw->led_conf = led_conf;
438 	else
439 		hw->led_conf = 0xFFFF;
440 
441 	err = hw->mac.init_hw(hw);
442 	if (err != 0) {
443 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
444 		return -EIO;
445 	}
446 
447 	/* Reset the hw statistics */
448 	ngbe_dev_stats_reset(eth_dev);
449 
450 	/* disable interrupt */
451 	ngbe_disable_intr(hw);
452 
453 	/* Allocate memory for storing MAC addresses */
454 	eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
455 					       hw->mac.num_rar_entries, 0);
456 	if (eth_dev->data->mac_addrs == NULL) {
457 		PMD_INIT_LOG(ERR,
458 			     "Failed to allocate %u bytes needed to store MAC addresses",
459 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
460 		return -ENOMEM;
461 	}
462 
463 	/* Copy the permanent MAC address */
464 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
465 			&eth_dev->data->mac_addrs[0]);
466 
467 	/* Allocate memory for storing hash filter MAC addresses */
468 	eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
469 			RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
470 	if (eth_dev->data->hash_mac_addrs == NULL) {
471 		PMD_INIT_LOG(ERR,
472 			     "Failed to allocate %d bytes needed to store MAC addresses",
473 			     RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
474 		rte_free(eth_dev->data->mac_addrs);
475 		eth_dev->data->mac_addrs = NULL;
476 		return -ENOMEM;
477 	}
478 
479 	/* initialize the vfta */
480 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
481 
482 	/* initialize the hw strip bitmap*/
483 	memset(hwstrip, 0, sizeof(*hwstrip));
484 
485 	/* initialize PF if max_vfs not zero */
486 	ret = ngbe_pf_host_init(eth_dev);
487 	if (ret) {
488 		rte_free(eth_dev->data->mac_addrs);
489 		eth_dev->data->mac_addrs = NULL;
490 		rte_free(eth_dev->data->hash_mac_addrs);
491 		eth_dev->data->hash_mac_addrs = NULL;
492 		return ret;
493 	}
494 
495 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
496 	/* let hardware know driver is loaded */
497 	ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
498 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
499 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
500 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
501 	ngbe_flush(hw);
502 
503 	PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
504 			(int)hw->mac.type, (int)hw->phy.type);
505 
506 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
507 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
508 		     pci_dev->id.device_id);
509 
510 	rte_intr_callback_register(intr_handle,
511 				   ngbe_dev_interrupt_handler, eth_dev);
512 
513 	/* enable uio/vfio intr/eventfd mapping */
514 	rte_intr_enable(intr_handle);
515 
516 	/* enable support intr */
517 	ngbe_enable_intr(eth_dev);
518 
519 	return 0;
520 }
521 
522 static int
523 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
524 {
525 	PMD_INIT_FUNC_TRACE();
526 
527 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
528 		return 0;
529 
530 	ngbe_dev_close(eth_dev);
531 
532 	return 0;
533 }
534 
535 static int
536 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
537 		struct rte_pci_device *pci_dev)
538 {
539 	return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
540 			sizeof(struct ngbe_adapter),
541 			eth_dev_pci_specific_init, pci_dev,
542 			eth_ngbe_dev_init, NULL);
543 }
544 
545 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
546 {
547 	struct rte_eth_dev *ethdev;
548 
549 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
550 	if (ethdev == NULL)
551 		return 0;
552 
553 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ngbe_dev_uninit);
554 }
555 
556 static struct rte_pci_driver rte_ngbe_pmd = {
557 	.id_table = pci_id_ngbe_map,
558 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
559 		     RTE_PCI_DRV_INTR_LSC,
560 	.probe = eth_ngbe_pci_probe,
561 	.remove = eth_ngbe_pci_remove,
562 };
563 
564 static int
565 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
566 {
567 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
568 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
569 	uint32_t vfta;
570 	uint32_t vid_idx;
571 	uint32_t vid_bit;
572 
573 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
574 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
575 	vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
576 	if (on)
577 		vfta |= vid_bit;
578 	else
579 		vfta &= ~vid_bit;
580 	wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
581 
582 	/* update local VFTA copy */
583 	shadow_vfta->vfta[vid_idx] = vfta;
584 
585 	return 0;
586 }
587 
588 static void
589 ngbe_vlan_strip_q_set(struct rte_eth_dev *dev, uint16_t queue, int on)
590 {
591 	if (on)
592 		ngbe_vlan_hw_strip_enable(dev, queue);
593 	else
594 		ngbe_vlan_hw_strip_disable(dev, queue);
595 }
596 
597 static void
598 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
599 {
600 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
601 
602 	if (!hw->adapter_stopped) {
603 		PMD_DRV_LOG(ERR, "Please stop port first");
604 		return;
605 	}
606 
607 	ngbe_vlan_strip_q_set(dev, queue, on);
608 }
609 
610 static int
611 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
612 		    enum rte_vlan_type vlan_type,
613 		    uint16_t tpid)
614 {
615 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
616 	int ret = 0;
617 	uint32_t portctrl, vlan_ext, qinq;
618 
619 	portctrl = rd32(hw, NGBE_PORTCTL);
620 
621 	vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
622 	qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
623 	switch (vlan_type) {
624 	case RTE_ETH_VLAN_TYPE_INNER:
625 		if (vlan_ext) {
626 			wr32m(hw, NGBE_VLANCTL,
627 				NGBE_VLANCTL_TPID_MASK,
628 				NGBE_VLANCTL_TPID(tpid));
629 			wr32m(hw, NGBE_DMATXCTRL,
630 				NGBE_DMATXCTRL_TPID_MASK,
631 				NGBE_DMATXCTRL_TPID(tpid));
632 		} else {
633 			ret = -ENOTSUP;
634 			PMD_DRV_LOG(ERR,
635 				"Inner type is not supported by single VLAN");
636 		}
637 
638 		if (qinq) {
639 			wr32m(hw, NGBE_TAGTPID(0),
640 				NGBE_TAGTPID_LSB_MASK,
641 				NGBE_TAGTPID_LSB(tpid));
642 		}
643 		break;
644 	case RTE_ETH_VLAN_TYPE_OUTER:
645 		if (vlan_ext) {
646 			/* Only the high 16-bits is valid */
647 			wr32m(hw, NGBE_EXTAG,
648 				NGBE_EXTAG_VLAN_MASK,
649 				NGBE_EXTAG_VLAN(tpid));
650 		} else {
651 			wr32m(hw, NGBE_VLANCTL,
652 				NGBE_VLANCTL_TPID_MASK,
653 				NGBE_VLANCTL_TPID(tpid));
654 			wr32m(hw, NGBE_DMATXCTRL,
655 				NGBE_DMATXCTRL_TPID_MASK,
656 				NGBE_DMATXCTRL_TPID(tpid));
657 		}
658 
659 		if (qinq) {
660 			wr32m(hw, NGBE_TAGTPID(0),
661 				NGBE_TAGTPID_MSB_MASK,
662 				NGBE_TAGTPID_MSB(tpid));
663 		}
664 		break;
665 	default:
666 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
667 		return -EINVAL;
668 	}
669 
670 	return ret;
671 }
672 
673 void
674 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
675 {
676 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
677 	uint32_t vlnctrl;
678 
679 	PMD_INIT_FUNC_TRACE();
680 
681 	/* Filter Table Disable */
682 	vlnctrl = rd32(hw, NGBE_VLANCTL);
683 	vlnctrl &= ~NGBE_VLANCTL_VFE;
684 	wr32(hw, NGBE_VLANCTL, vlnctrl);
685 }
686 
687 void
688 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
689 {
690 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
691 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
692 	uint32_t vlnctrl;
693 	uint16_t i;
694 
695 	PMD_INIT_FUNC_TRACE();
696 
697 	/* Filter Table Enable */
698 	vlnctrl = rd32(hw, NGBE_VLANCTL);
699 	vlnctrl &= ~NGBE_VLANCTL_CFIENA;
700 	vlnctrl |= NGBE_VLANCTL_VFE;
701 	wr32(hw, NGBE_VLANCTL, vlnctrl);
702 
703 	/* write whatever is in local vfta copy */
704 	for (i = 0; i < NGBE_VFTA_SIZE; i++)
705 		wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
706 }
707 
708 void
709 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
710 {
711 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
712 	struct ngbe_rx_queue *rxq;
713 
714 	if (queue >= NGBE_MAX_RX_QUEUE_NUM)
715 		return;
716 
717 	if (on)
718 		NGBE_SET_HWSTRIP(hwstrip, queue);
719 	else
720 		NGBE_CLEAR_HWSTRIP(hwstrip, queue);
721 
722 	if (queue >= dev->data->nb_rx_queues)
723 		return;
724 
725 	rxq = dev->data->rx_queues[queue];
726 
727 	if (on) {
728 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
729 		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
730 	} else {
731 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
732 		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
733 	}
734 }
735 
736 static void
737 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
738 {
739 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
740 	uint32_t ctrl;
741 
742 	PMD_INIT_FUNC_TRACE();
743 
744 	ctrl = rd32(hw, NGBE_RXCFG(queue));
745 	ctrl &= ~NGBE_RXCFG_VLAN;
746 	wr32(hw, NGBE_RXCFG(queue), ctrl);
747 
748 	/* record those setting for HW strip per queue */
749 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
750 }
751 
752 static void
753 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
754 {
755 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
756 	uint32_t ctrl;
757 
758 	PMD_INIT_FUNC_TRACE();
759 
760 	ctrl = rd32(hw, NGBE_RXCFG(queue));
761 	ctrl |= NGBE_RXCFG_VLAN;
762 	wr32(hw, NGBE_RXCFG(queue), ctrl);
763 
764 	/* record those setting for HW strip per queue */
765 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
766 }
767 
768 static void
769 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
770 {
771 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
772 	uint32_t ctrl;
773 
774 	PMD_INIT_FUNC_TRACE();
775 
776 	ctrl = rd32(hw, NGBE_PORTCTL);
777 	ctrl &= ~NGBE_PORTCTL_VLANEXT;
778 	ctrl &= ~NGBE_PORTCTL_QINQ;
779 	wr32(hw, NGBE_PORTCTL, ctrl);
780 }
781 
782 static void
783 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
784 {
785 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
786 	uint32_t ctrl;
787 
788 	PMD_INIT_FUNC_TRACE();
789 
790 	ctrl  = rd32(hw, NGBE_PORTCTL);
791 	ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
792 	wr32(hw, NGBE_PORTCTL, ctrl);
793 }
794 
795 static void
796 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
797 {
798 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
799 	uint32_t ctrl;
800 
801 	PMD_INIT_FUNC_TRACE();
802 
803 	ctrl = rd32(hw, NGBE_PORTCTL);
804 	ctrl &= ~NGBE_PORTCTL_QINQ;
805 	wr32(hw, NGBE_PORTCTL, ctrl);
806 }
807 
808 static void
809 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
810 {
811 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
812 	uint32_t ctrl;
813 
814 	PMD_INIT_FUNC_TRACE();
815 
816 	ctrl  = rd32(hw, NGBE_PORTCTL);
817 	ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
818 	wr32(hw, NGBE_PORTCTL, ctrl);
819 }
820 
821 void
822 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
823 {
824 	struct ngbe_rx_queue *rxq;
825 	uint16_t i;
826 
827 	PMD_INIT_FUNC_TRACE();
828 
829 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
830 		rxq = dev->data->rx_queues[i];
831 
832 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
833 			ngbe_vlan_strip_q_set(dev, i, 1);
834 		else
835 			ngbe_vlan_strip_q_set(dev, i, 0);
836 	}
837 }
838 
839 void
840 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
841 {
842 	uint16_t i;
843 	struct rte_eth_rxmode *rxmode;
844 	struct ngbe_rx_queue *rxq;
845 
846 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
847 		rxmode = &dev->data->dev_conf.rxmode;
848 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
849 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
850 				rxq = dev->data->rx_queues[i];
851 				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
852 			}
853 		else
854 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
855 				rxq = dev->data->rx_queues[i];
856 				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
857 			}
858 	}
859 }
860 
861 static int
862 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
863 {
864 	struct rte_eth_rxmode *rxmode;
865 	rxmode = &dev->data->dev_conf.rxmode;
866 
867 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
868 		ngbe_vlan_hw_strip_config(dev);
869 
870 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
871 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
872 			ngbe_vlan_hw_filter_enable(dev);
873 		else
874 			ngbe_vlan_hw_filter_disable(dev);
875 	}
876 
877 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
878 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
879 			ngbe_vlan_hw_extend_enable(dev);
880 		else
881 			ngbe_vlan_hw_extend_disable(dev);
882 	}
883 
884 	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
885 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
886 			ngbe_qinq_hw_strip_enable(dev);
887 		else
888 			ngbe_qinq_hw_strip_disable(dev);
889 	}
890 
891 	return 0;
892 }
893 
894 static int
895 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
896 {
897 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
898 
899 	if (!hw->adapter_stopped && (mask & RTE_ETH_VLAN_STRIP_MASK)) {
900 		PMD_DRV_LOG(ERR, "Please stop port first");
901 		return -EPERM;
902 	}
903 
904 	ngbe_config_vlan_strip_on_all_queues(dev, mask);
905 
906 	ngbe_vlan_offload_config(dev, mask);
907 
908 	return 0;
909 }
910 
911 static int
912 ngbe_dev_configure(struct rte_eth_dev *dev)
913 {
914 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
915 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
916 
917 	PMD_INIT_FUNC_TRACE();
918 
919 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
920 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
921 
922 	/* set flag to update link status after init */
923 	intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
924 
925 	/*
926 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
927 	 * allocation Rx preconditions we will reset it.
928 	 */
929 	adapter->rx_bulk_alloc_allowed = true;
930 	adapter->rx_vec_allowed = true;
931 
932 	return 0;
933 }
934 
935 static void
936 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
937 {
938 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
939 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
940 
941 	wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
942 	wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
943 	wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
944 	if (hw->phy.type == ngbe_phy_yt8521s_sfi)
945 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
946 	else
947 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
948 
949 	intr->mask_misc |= NGBE_ICRMISC_GPIO | NGBE_ICRMISC_HEAT;
950 }
951 
952 /*
953  * Configure device link speed and setup link.
954  * It returns 0 on success.
955  */
956 static int
957 ngbe_dev_start(struct rte_eth_dev *dev)
958 {
959 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
960 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
961 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
962 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
963 	uint32_t intr_vector = 0;
964 	int err;
965 	bool link_up = false, negotiate = false;
966 	uint32_t speed = 0;
967 	uint32_t allowed_speeds = 0;
968 	int mask = 0;
969 	int status;
970 	uint32_t *link_speeds;
971 
972 	PMD_INIT_FUNC_TRACE();
973 
974 	/* disable uio/vfio intr/eventfd mapping */
975 	rte_intr_disable(intr_handle);
976 
977 	/* stop adapter */
978 	hw->adapter_stopped = 0;
979 
980 	/* reinitialize adapter, this calls reset and start */
981 	hw->nb_rx_queues = dev->data->nb_rx_queues;
982 	hw->nb_tx_queues = dev->data->nb_tx_queues;
983 	status = ngbe_pf_reset_hw(hw);
984 	if (status != 0)
985 		return -1;
986 	hw->mac.start_hw(hw);
987 	hw->mac.get_link_status = true;
988 
989 	ngbe_set_pcie_master(hw, true);
990 
991 	/* configure PF module if SRIOV enabled */
992 	ngbe_pf_host_configure(dev);
993 
994 	ngbe_dev_phy_intr_setup(dev);
995 
996 	/* check and configure queue intr-vector mapping */
997 	if ((rte_intr_cap_multiple(intr_handle) ||
998 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
999 	    dev->data->dev_conf.intr_conf.rxq != 0) {
1000 		intr_vector = dev->data->nb_rx_queues;
1001 		if (rte_intr_efd_enable(intr_handle, intr_vector))
1002 			return -1;
1003 	}
1004 
1005 	if (rte_intr_dp_is_en(intr_handle)) {
1006 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
1007 						   dev->data->nb_rx_queues)) {
1008 			PMD_INIT_LOG(ERR,
1009 				     "Failed to allocate %d rx_queues intr_vec",
1010 				     dev->data->nb_rx_queues);
1011 			return -ENOMEM;
1012 		}
1013 	}
1014 
1015 	/* configure MSI-X for sleep until Rx interrupt */
1016 	ngbe_configure_msix(dev);
1017 
1018 	/* initialize transmission unit */
1019 	ngbe_dev_tx_init(dev);
1020 
1021 	/* This can fail when allocating mbufs for descriptor rings */
1022 	err = ngbe_dev_rx_init(dev);
1023 	if (err != 0) {
1024 		PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
1025 		goto error;
1026 	}
1027 
1028 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1029 		RTE_ETH_VLAN_EXTEND_MASK;
1030 	err = ngbe_vlan_offload_config(dev, mask);
1031 	if (err != 0) {
1032 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1033 		goto error;
1034 	}
1035 
1036 	hw->mac.setup_pba(hw);
1037 	ngbe_pbthresh_set(dev);
1038 	ngbe_configure_port(dev);
1039 
1040 	err = ngbe_dev_rxtx_start(dev);
1041 	if (err < 0) {
1042 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1043 		goto error;
1044 	}
1045 
1046 	/* Skip link setup if loopback mode is enabled. */
1047 	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1048 		goto skip_link_setup;
1049 
1050 	hw->lsc = dev->data->dev_conf.intr_conf.lsc;
1051 
1052 	err = hw->mac.check_link(hw, &speed, &link_up, 0);
1053 	if (err != 0)
1054 		goto error;
1055 	dev->data->dev_link.link_status = link_up;
1056 
1057 	link_speeds = &dev->data->dev_conf.link_speeds;
1058 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
1059 		negotiate = true;
1060 
1061 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1062 	if (err != 0)
1063 		goto error;
1064 
1065 	allowed_speeds = 0;
1066 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1067 		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
1068 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1069 		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
1070 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1071 		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
1072 
1073 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
1074 		PMD_INIT_LOG(ERR, "Invalid link setting");
1075 		goto error;
1076 	}
1077 
1078 	speed = 0x0;
1079 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1080 		speed = hw->mac.default_speeds;
1081 	} else {
1082 		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1083 			speed |= NGBE_LINK_SPEED_1GB_FULL;
1084 		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1085 			speed |= NGBE_LINK_SPEED_100M_FULL;
1086 		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
1087 			speed |= NGBE_LINK_SPEED_10M_FULL;
1088 	}
1089 
1090 	if (!hw->ncsi_enabled) {
1091 		err = hw->phy.init_hw(hw);
1092 		if (err != 0) {
1093 			PMD_INIT_LOG(ERR, "PHY init failed");
1094 			goto error;
1095 		}
1096 	}
1097 	err = hw->mac.setup_link(hw, speed, link_up);
1098 	if (err != 0)
1099 		goto error;
1100 
1101 skip_link_setup:
1102 
1103 	if (rte_intr_allow_others(intr_handle)) {
1104 		ngbe_dev_misc_interrupt_setup(dev);
1105 		/* check if lsc interrupt is enabled */
1106 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1107 			ngbe_dev_lsc_interrupt_setup(dev, TRUE);
1108 		else
1109 			ngbe_dev_lsc_interrupt_setup(dev, FALSE);
1110 		ngbe_dev_macsec_interrupt_setup(dev);
1111 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1112 	} else {
1113 		rte_intr_callback_unregister(intr_handle,
1114 					     ngbe_dev_interrupt_handler, dev);
1115 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1116 			PMD_INIT_LOG(INFO,
1117 				     "LSC won't enable because of no intr multiplex");
1118 	}
1119 
1120 	/* check if rxq interrupt is enabled */
1121 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1122 	    rte_intr_dp_is_en(intr_handle))
1123 		ngbe_dev_rxq_interrupt_setup(dev);
1124 
1125 	/* enable UIO/VFIO intr/eventfd mapping */
1126 	rte_intr_enable(intr_handle);
1127 
1128 	/* resume enabled intr since HW reset */
1129 	ngbe_enable_intr(dev);
1130 
1131 	if (hw->gpio_ctl) {
1132 		/* gpio0 is used to power on/off control*/
1133 		wr32(hw, NGBE_GPIODATA, 0);
1134 	}
1135 
1136 	/*
1137 	 * Update link status right before return, because it may
1138 	 * start link configuration process in a separate thread.
1139 	 */
1140 	ngbe_dev_link_update(dev, 0);
1141 
1142 	ngbe_read_stats_registers(hw, hw_stats);
1143 	hw->offset_loaded = 1;
1144 
1145 	return 0;
1146 
1147 error:
1148 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1149 	ngbe_dev_clear_queues(dev);
1150 	return -EIO;
1151 }
1152 
1153 /*
1154  * Stop device: disable rx and tx functions to allow for reconfiguring.
1155  */
1156 static int
1157 ngbe_dev_stop(struct rte_eth_dev *dev)
1158 {
1159 	struct rte_eth_link link;
1160 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1161 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1162 	struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1163 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1164 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1165 	int vf;
1166 
1167 	if (hw->adapter_stopped)
1168 		goto out;
1169 
1170 	PMD_INIT_FUNC_TRACE();
1171 
1172 	if (hw->gpio_ctl) {
1173 		/* gpio0 is used to power on/off control*/
1174 		wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1175 	}
1176 
1177 	/* disable interrupts */
1178 	ngbe_disable_intr(hw);
1179 
1180 	/* reset the NIC */
1181 	ngbe_pf_reset_hw(hw);
1182 	hw->adapter_stopped = 0;
1183 
1184 	/* stop adapter */
1185 	ngbe_stop_hw(hw);
1186 
1187 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1188 		vfinfo[vf].clear_to_send = false;
1189 
1190 	ngbe_dev_clear_queues(dev);
1191 
1192 	/* Clear stored conf */
1193 	dev->data->scattered_rx = 0;
1194 
1195 	/* Clear recorded link status */
1196 	memset(&link, 0, sizeof(link));
1197 	rte_eth_linkstatus_set(dev, &link);
1198 
1199 	if (!rte_intr_allow_others(intr_handle))
1200 		/* resume to the default handler */
1201 		rte_intr_callback_register(intr_handle,
1202 					   ngbe_dev_interrupt_handler,
1203 					   (void *)dev);
1204 
1205 	/* Clean datapath event and queue/vec mapping */
1206 	rte_intr_efd_disable(intr_handle);
1207 	rte_intr_vec_list_free(intr_handle);
1208 
1209 	ngbe_set_pcie_master(hw, true);
1210 
1211 	adapter->rss_reta_updated = 0;
1212 
1213 	hw->adapter_stopped = true;
1214 	dev->data->dev_started = 0;
1215 
1216 out:
1217 	/* close phy to prevent reset in dev_close from restarting physical link */
1218 	if (!(hw->wol_enabled || hw->ncsi_enabled))
1219 		hw->phy.set_phy_power(hw, false);
1220 
1221 	return 0;
1222 }
1223 
1224 /*
1225  * Set device link up: power on.
1226  */
1227 static int
1228 ngbe_dev_set_link_up(struct rte_eth_dev *dev)
1229 {
1230 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1231 
1232 	if (!(hw->ncsi_enabled || hw->wol_enabled))
1233 		hw->phy.set_phy_power(hw, true);
1234 
1235 	return 0;
1236 }
1237 
1238 /*
1239  * Set device link down: power off.
1240  */
1241 static int
1242 ngbe_dev_set_link_down(struct rte_eth_dev *dev)
1243 {
1244 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1245 
1246 	if (!(hw->ncsi_enabled || hw->wol_enabled))
1247 		hw->phy.set_phy_power(hw, false);
1248 
1249 	return 0;
1250 }
1251 
1252 /*
1253  * Reset and stop device.
1254  */
1255 static int
1256 ngbe_dev_close(struct rte_eth_dev *dev)
1257 {
1258 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1259 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1260 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1261 	int retries = 0;
1262 	int ret;
1263 
1264 	PMD_INIT_FUNC_TRACE();
1265 
1266 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1267 		return 0;
1268 
1269 	ngbe_pf_reset_hw(hw);
1270 
1271 	ngbe_dev_stop(dev);
1272 
1273 	/* Let firmware take over control of hardware */
1274 	wr32m(hw, NGBE_PORTCTL, NGBE_PORTCTL_DRVLOAD, 0);
1275 
1276 	ngbe_dev_free_queues(dev);
1277 
1278 	ngbe_set_pcie_master(hw, false);
1279 
1280 	/* reprogram the RAR[0] in case user changed it. */
1281 	ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1282 
1283 	/* Unlock any pending hardware semaphore */
1284 	ngbe_swfw_lock_reset(hw);
1285 
1286 	/* disable uio intr before callback unregister */
1287 	rte_intr_disable(intr_handle);
1288 
1289 	do {
1290 		ret = rte_intr_callback_unregister(intr_handle,
1291 				ngbe_dev_interrupt_handler, dev);
1292 		if (ret >= 0 || ret == -ENOENT) {
1293 			break;
1294 		} else if (ret != -EAGAIN) {
1295 			PMD_INIT_LOG(ERR,
1296 				"intr callback unregister failed: %d",
1297 				ret);
1298 		}
1299 		rte_delay_ms(100);
1300 	} while (retries++ < (10 + NGBE_LINK_UP_TIME));
1301 
1302 	/* uninitialize PF if max_vfs not zero */
1303 	ngbe_pf_host_uninit(dev);
1304 
1305 	rte_free(dev->data->mac_addrs);
1306 	dev->data->mac_addrs = NULL;
1307 
1308 	rte_free(dev->data->hash_mac_addrs);
1309 	dev->data->hash_mac_addrs = NULL;
1310 
1311 	return ret;
1312 }
1313 
1314 /*
1315  * Reset PF device.
1316  */
1317 static int
1318 ngbe_dev_reset(struct rte_eth_dev *dev)
1319 {
1320 	int ret;
1321 
1322 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
1323 	 * its VF to make them align with it. The detailed notification
1324 	 * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1325 	 * To avoid unexpected behavior in VF, currently reset of PF with
1326 	 * SR-IOV activation is not supported. It might be supported later.
1327 	 */
1328 	if (dev->data->sriov.active)
1329 		return -ENOTSUP;
1330 
1331 	ret = eth_ngbe_dev_uninit(dev);
1332 	if (ret != 0)
1333 		return ret;
1334 
1335 	ret = eth_ngbe_dev_init(dev, NULL);
1336 
1337 	return ret;
1338 }
1339 
1340 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1341 	{                                                       \
1342 		uint32_t current_counter = rd32(hw, reg);       \
1343 		if (current_counter < last_counter)             \
1344 			current_counter += 0x100000000LL;       \
1345 		if (!hw->offset_loaded)                         \
1346 			last_counter = current_counter;         \
1347 		counter = current_counter - last_counter;       \
1348 		counter &= 0xFFFFFFFFLL;                        \
1349 	}
1350 
1351 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1352 	{                                                                \
1353 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1354 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1355 		uint64_t current_counter = (current_counter_msb << 32) | \
1356 			current_counter_lsb;                             \
1357 		if (current_counter < last_counter)                      \
1358 			current_counter += 0x1000000000LL;               \
1359 		if (!hw->offset_loaded)                                  \
1360 			last_counter = current_counter;                  \
1361 		counter = current_counter - last_counter;                \
1362 		counter &= 0xFFFFFFFFFLL;                                \
1363 	}
1364 
1365 void
1366 ngbe_read_stats_registers(struct ngbe_hw *hw,
1367 			   struct ngbe_hw_stats *hw_stats)
1368 {
1369 	unsigned int i;
1370 
1371 	/* QP Stats */
1372 	for (i = 0; i < hw->nb_rx_queues; i++) {
1373 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1374 			hw->qp_last[i].rx_qp_packets,
1375 			hw_stats->qp[i].rx_qp_packets);
1376 		UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1377 			hw->qp_last[i].rx_qp_bytes,
1378 			hw_stats->qp[i].rx_qp_bytes);
1379 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1380 			hw->qp_last[i].rx_qp_mc_packets,
1381 			hw_stats->qp[i].rx_qp_mc_packets);
1382 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1383 			hw->qp_last[i].rx_qp_bc_packets,
1384 			hw_stats->qp[i].rx_qp_bc_packets);
1385 	}
1386 
1387 	for (i = 0; i < hw->nb_tx_queues; i++) {
1388 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1389 			hw->qp_last[i].tx_qp_packets,
1390 			hw_stats->qp[i].tx_qp_packets);
1391 		UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1392 			hw->qp_last[i].tx_qp_bytes,
1393 			hw_stats->qp[i].tx_qp_bytes);
1394 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1395 			hw->qp_last[i].tx_qp_mc_packets,
1396 			hw_stats->qp[i].tx_qp_mc_packets);
1397 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1398 			hw->qp_last[i].tx_qp_bc_packets,
1399 			hw_stats->qp[i].tx_qp_bc_packets);
1400 	}
1401 
1402 	/* PB Stats */
1403 	hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1404 	hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1405 	hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1406 	hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1407 	hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1408 	hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1409 
1410 	hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1411 	hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1412 
1413 	/* DMA Stats */
1414 	hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1415 	hw_stats->tx_dma_drop += rd32(hw, NGBE_DMATXDROP);
1416 	hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1417 	hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1418 	hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1419 	hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1420 	hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1421 
1422 	/* MAC Stats */
1423 	hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1424 	hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1425 	hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1426 
1427 	hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1428 	hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1429 	hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1430 
1431 	hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1432 	hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1433 
1434 	hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1435 	hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1436 	hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1437 	hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1438 	hw_stats->rx_size_512_to_1023_packets +=
1439 			rd64(hw, NGBE_MACRX512TO1023L);
1440 	hw_stats->rx_size_1024_to_max_packets +=
1441 			rd64(hw, NGBE_MACRX1024TOMAXL);
1442 	hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1443 	hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1444 	hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1445 	hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1446 	hw_stats->tx_size_512_to_1023_packets +=
1447 			rd64(hw, NGBE_MACTX512TO1023L);
1448 	hw_stats->tx_size_1024_to_max_packets +=
1449 			rd64(hw, NGBE_MACTX1024TOMAXL);
1450 
1451 	hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1452 	hw_stats->rx_oversize_cnt += rd32(hw, NGBE_MACRXOVERSIZE);
1453 	hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1454 
1455 	/* MNG Stats */
1456 	hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1457 	hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1458 	hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1459 	hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1460 
1461 	/* MACsec Stats */
1462 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1463 	hw_stats->tx_macsec_pkts_encrypted +=
1464 			rd32(hw, NGBE_LSECTX_ENCPKT);
1465 	hw_stats->tx_macsec_pkts_protected +=
1466 			rd32(hw, NGBE_LSECTX_PROTPKT);
1467 	hw_stats->tx_macsec_octets_encrypted +=
1468 			rd32(hw, NGBE_LSECTX_ENCOCT);
1469 	hw_stats->tx_macsec_octets_protected +=
1470 			rd32(hw, NGBE_LSECTX_PROTOCT);
1471 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1472 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1473 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1474 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1475 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1476 	hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1477 	hw_stats->rx_macsec_sc_pkts_unchecked +=
1478 			rd32(hw, NGBE_LSECRX_UNCHKPKT);
1479 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1480 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1481 	for (i = 0; i < 2; i++) {
1482 		hw_stats->rx_macsec_sa_pkts_ok +=
1483 			rd32(hw, NGBE_LSECRX_OKPKT(i));
1484 		hw_stats->rx_macsec_sa_pkts_invalid +=
1485 			rd32(hw, NGBE_LSECRX_INVPKT(i));
1486 		hw_stats->rx_macsec_sa_pkts_notvalid +=
1487 			rd32(hw, NGBE_LSECRX_BADPKT(i));
1488 	}
1489 	for (i = 0; i < 4; i++) {
1490 		hw_stats->rx_macsec_sa_pkts_unusedsa +=
1491 			rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1492 		hw_stats->rx_macsec_sa_pkts_notusingsa +=
1493 			rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1494 	}
1495 	hw_stats->rx_total_missed_packets =
1496 			hw_stats->rx_up_dropped;
1497 }
1498 
1499 static int
1500 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1501 {
1502 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1503 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1504 	struct ngbe_stat_mappings *stat_mappings =
1505 			NGBE_DEV_STAT_MAPPINGS(dev);
1506 	struct ngbe_tx_queue *txq;
1507 	uint32_t i, j;
1508 
1509 	ngbe_read_stats_registers(hw, hw_stats);
1510 
1511 	if (stats == NULL)
1512 		return -EINVAL;
1513 
1514 	/* Fill out the rte_eth_stats statistics structure */
1515 	stats->ipackets = hw_stats->rx_packets;
1516 	stats->ibytes = hw_stats->rx_bytes;
1517 	stats->opackets = hw_stats->tx_packets;
1518 	stats->obytes = hw_stats->tx_bytes;
1519 
1520 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1521 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1522 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1523 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1524 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1525 	for (i = 0; i < NGBE_MAX_QP; i++) {
1526 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1527 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1528 		uint32_t q_map;
1529 
1530 		q_map = (stat_mappings->rqsm[n] >> offset)
1531 				& QMAP_FIELD_RESERVED_BITS_MASK;
1532 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1533 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1534 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1535 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1536 
1537 		q_map = (stat_mappings->tqsm[n] >> offset)
1538 				& QMAP_FIELD_RESERVED_BITS_MASK;
1539 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1540 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1541 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1542 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1543 	}
1544 
1545 	/* Rx Errors */
1546 	stats->imissed  = hw_stats->rx_total_missed_packets +
1547 			  hw_stats->rx_dma_drop;
1548 	stats->ierrors  = hw_stats->rx_crc_errors +
1549 			  hw_stats->rx_mac_short_packet_dropped +
1550 			  hw_stats->rx_length_errors +
1551 			  hw_stats->rx_undersize_errors +
1552 			  hw_stats->rdb_drp_cnt +
1553 			  hw_stats->rx_illegal_byte_errors +
1554 			  hw_stats->rx_error_bytes +
1555 			  hw_stats->rx_fragment_errors;
1556 
1557 	/* Tx Errors */
1558 	stats->oerrors  = 0;
1559 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1560 		txq = dev->data->tx_queues[i];
1561 		stats->oerrors += txq->desc_error;
1562 	}
1563 
1564 	return 0;
1565 }
1566 
1567 static int
1568 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1569 {
1570 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1571 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1572 	struct ngbe_tx_queue *txq;
1573 	uint32_t i;
1574 
1575 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1576 		txq = dev->data->tx_queues[i];
1577 		txq->desc_error = 0;
1578 	}
1579 
1580 	/* HW registers are cleared on read */
1581 	hw->offset_loaded = 0;
1582 	ngbe_dev_stats_get(dev, NULL);
1583 	hw->offset_loaded = 1;
1584 
1585 	/* Reset software totals */
1586 	memset(hw_stats, 0, sizeof(*hw_stats));
1587 
1588 	return 0;
1589 }
1590 
1591 /* This function calculates the number of xstats based on the current config */
1592 static unsigned
1593 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1594 {
1595 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1596 	return NGBE_NB_HW_STATS +
1597 	       NGBE_NB_QP_STATS * nb_queues;
1598 }
1599 
1600 static inline int
1601 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1602 {
1603 	int nb, st;
1604 
1605 	/* Extended stats from ngbe_hw_stats */
1606 	if (id < NGBE_NB_HW_STATS) {
1607 		snprintf(name, size, "[hw]%s",
1608 			rte_ngbe_stats_strings[id].name);
1609 		return 0;
1610 	}
1611 	id -= NGBE_NB_HW_STATS;
1612 
1613 	/* Queue Stats */
1614 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1615 		nb = id / NGBE_NB_QP_STATS;
1616 		st = id % NGBE_NB_QP_STATS;
1617 		snprintf(name, size, "[q%u]%s", nb,
1618 			rte_ngbe_qp_strings[st].name);
1619 		return 0;
1620 	}
1621 	id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1622 
1623 	return -(int)(id + 1);
1624 }
1625 
1626 static inline int
1627 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1628 {
1629 	int nb, st;
1630 
1631 	/* Extended stats from ngbe_hw_stats */
1632 	if (id < NGBE_NB_HW_STATS) {
1633 		*offset = rte_ngbe_stats_strings[id].offset;
1634 		return 0;
1635 	}
1636 	id -= NGBE_NB_HW_STATS;
1637 
1638 	/* Queue Stats */
1639 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1640 		nb = id / NGBE_NB_QP_STATS;
1641 		st = id % NGBE_NB_QP_STATS;
1642 		*offset = rte_ngbe_qp_strings[st].offset +
1643 			nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1644 		return 0;
1645 	}
1646 
1647 	return -1;
1648 }
1649 
1650 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1651 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1652 {
1653 	unsigned int i, count;
1654 
1655 	count = ngbe_xstats_calc_num(dev);
1656 	if (xstats_names == NULL)
1657 		return count;
1658 
1659 	/* Note: limit >= cnt_stats checked upstream
1660 	 * in rte_eth_xstats_names()
1661 	 */
1662 	limit = min(limit, count);
1663 
1664 	/* Extended stats from ngbe_hw_stats */
1665 	for (i = 0; i < limit; i++) {
1666 		if (ngbe_get_name_by_id(i, xstats_names[i].name,
1667 			sizeof(xstats_names[i].name))) {
1668 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1669 			break;
1670 		}
1671 	}
1672 
1673 	return i;
1674 }
1675 
1676 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1677 	const uint64_t *ids,
1678 	struct rte_eth_xstat_name *xstats_names,
1679 	unsigned int limit)
1680 {
1681 	unsigned int i;
1682 
1683 	if (ids == NULL)
1684 		return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1685 
1686 	for (i = 0; i < limit; i++) {
1687 		if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1688 				sizeof(xstats_names[i].name))) {
1689 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1690 			return -1;
1691 		}
1692 	}
1693 
1694 	return i;
1695 }
1696 
1697 static int
1698 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1699 					 unsigned int limit)
1700 {
1701 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1702 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1703 	unsigned int i, count;
1704 
1705 	ngbe_read_stats_registers(hw, hw_stats);
1706 
1707 	/* If this is a reset xstats is NULL, and we have cleared the
1708 	 * registers by reading them.
1709 	 */
1710 	count = ngbe_xstats_calc_num(dev);
1711 	if (xstats == NULL)
1712 		return count;
1713 
1714 	limit = min(limit, ngbe_xstats_calc_num(dev));
1715 
1716 	/* Extended stats from ngbe_hw_stats */
1717 	for (i = 0; i < limit; i++) {
1718 		uint32_t offset = 0;
1719 
1720 		if (ngbe_get_offset_by_id(i, &offset)) {
1721 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1722 			break;
1723 		}
1724 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1725 		xstats[i].id = i;
1726 	}
1727 
1728 	return i;
1729 }
1730 
1731 static int
1732 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1733 					 unsigned int limit)
1734 {
1735 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1736 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1737 	unsigned int i, count;
1738 
1739 	ngbe_read_stats_registers(hw, hw_stats);
1740 
1741 	/* If this is a reset xstats is NULL, and we have cleared the
1742 	 * registers by reading them.
1743 	 */
1744 	count = ngbe_xstats_calc_num(dev);
1745 	if (values == NULL)
1746 		return count;
1747 
1748 	limit = min(limit, ngbe_xstats_calc_num(dev));
1749 
1750 	/* Extended stats from ngbe_hw_stats */
1751 	for (i = 0; i < limit; i++) {
1752 		uint32_t offset;
1753 
1754 		if (ngbe_get_offset_by_id(i, &offset)) {
1755 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1756 			break;
1757 		}
1758 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1759 	}
1760 
1761 	return i;
1762 }
1763 
1764 static int
1765 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1766 		uint64_t *values, unsigned int limit)
1767 {
1768 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1769 	unsigned int i;
1770 
1771 	if (ids == NULL)
1772 		return ngbe_dev_xstats_get_(dev, values, limit);
1773 
1774 	for (i = 0; i < limit; i++) {
1775 		uint32_t offset;
1776 
1777 		if (ngbe_get_offset_by_id(ids[i], &offset)) {
1778 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1779 			break;
1780 		}
1781 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1782 	}
1783 
1784 	return i;
1785 }
1786 
1787 static int
1788 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1789 {
1790 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1791 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1792 
1793 	/* HW registers are cleared on read */
1794 	hw->offset_loaded = 0;
1795 	ngbe_read_stats_registers(hw, hw_stats);
1796 	hw->offset_loaded = 1;
1797 
1798 	/* Reset software totals */
1799 	memset(hw_stats, 0, sizeof(*hw_stats));
1800 
1801 	return 0;
1802 }
1803 
1804 static int
1805 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1806 {
1807 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1808 	int ret;
1809 
1810 	ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1811 
1812 	if (ret < 0)
1813 		return -EINVAL;
1814 
1815 	ret += 1; /* add the size of '\0' */
1816 	if (fw_size < (size_t)ret)
1817 		return ret;
1818 
1819 	return 0;
1820 }
1821 
1822 static int
1823 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1824 {
1825 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1826 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1827 
1828 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1829 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1830 	dev_info->min_rx_bufsize = 1024;
1831 	dev_info->max_rx_pktlen = NGBE_MAX_MTU + NGBE_ETH_OVERHEAD;
1832 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1833 	dev_info->max_mtu = NGBE_MAX_MTU;
1834 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1835 	dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1836 	dev_info->max_vfs = pci_dev->max_vfs;
1837 	dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1838 	dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1839 				     dev_info->rx_queue_offload_capa);
1840 	dev_info->tx_queue_offload_capa = 0;
1841 	dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1842 
1843 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1844 		.rx_thresh = {
1845 			.pthresh = NGBE_DEFAULT_RX_PTHRESH,
1846 			.hthresh = NGBE_DEFAULT_RX_HTHRESH,
1847 			.wthresh = NGBE_DEFAULT_RX_WTHRESH,
1848 		},
1849 		.rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1850 		.rx_drop_en = 0,
1851 		.offloads = 0,
1852 	};
1853 
1854 	dev_info->default_txconf = (struct rte_eth_txconf) {
1855 		.tx_thresh = {
1856 			.pthresh = NGBE_DEFAULT_TX_PTHRESH,
1857 			.hthresh = NGBE_DEFAULT_TX_HTHRESH,
1858 			.wthresh = NGBE_DEFAULT_TX_WTHRESH,
1859 		},
1860 		.tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1861 		.offloads = 0,
1862 	};
1863 
1864 	dev_info->rx_desc_lim = rx_desc_lim;
1865 	dev_info->tx_desc_lim = tx_desc_lim;
1866 
1867 	dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1868 	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1869 	dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1870 
1871 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1872 				RTE_ETH_LINK_SPEED_10M;
1873 
1874 	/* Driver-preferred Rx/Tx parameters */
1875 	dev_info->default_rxportconf.burst_size = 32;
1876 	dev_info->default_txportconf.burst_size = 32;
1877 	dev_info->default_rxportconf.nb_queues = 1;
1878 	dev_info->default_txportconf.nb_queues = 1;
1879 	dev_info->default_rxportconf.ring_size = 256;
1880 	dev_info->default_txportconf.ring_size = 256;
1881 
1882 	return 0;
1883 }
1884 
1885 const uint32_t *
1886 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
1887 {
1888 	if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1889 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)
1890 	    dev->rx_pkt_burst == ngbe_recv_pkts_vec ||
1891 	    dev->rx_pkt_burst == ngbe_recv_scattered_pkts_vec ||
1892 #endif
1893 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1894 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1895 	    dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1896 		return ngbe_get_supported_ptypes(no_of_elements);
1897 
1898 	return NULL;
1899 }
1900 
1901 static void
1902 ngbe_dev_overheat(struct rte_eth_dev *dev)
1903 {
1904 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1905 	s32 temp_state;
1906 
1907 	temp_state = hw->mac.check_overtemp(hw);
1908 	if (!temp_state)
1909 		return;
1910 
1911 	if (temp_state == NGBE_ERR_UNDERTEMP) {
1912 		PMD_DRV_LOG(CRIT, "Network adapter has been started again, "
1913 			"since the temperature has been back to normal state.");
1914 		wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, NGBE_PBRXCTL_ENA);
1915 		ngbe_dev_set_link_up(dev);
1916 	} else if (temp_state == NGBE_ERR_OVERTEMP) {
1917 		PMD_DRV_LOG(CRIT, "Network adapter has been stopped because it has over heated.");
1918 		wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0);
1919 		ngbe_dev_set_link_down(dev);
1920 	}
1921 }
1922 
1923 /* return 0 means link status changed, -1 means not changed */
1924 int
1925 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1926 			    int wait_to_complete)
1927 {
1928 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1929 	struct rte_eth_link link;
1930 	u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1931 	u32 lan_speed = 0;
1932 	bool link_up;
1933 	int err;
1934 	int wait = 1;
1935 	u32 reg;
1936 
1937 	memset(&link, 0, sizeof(link));
1938 	link.link_status = RTE_ETH_LINK_DOWN;
1939 	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1940 	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1941 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1942 			~RTE_ETH_LINK_SPEED_AUTONEG);
1943 
1944 	hw->mac.get_link_status = true;
1945 
1946 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
1947 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1948 		wait = 0;
1949 
1950 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1951 	if (err != 0) {
1952 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1953 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1954 		return rte_eth_linkstatus_set(dev, &link);
1955 	}
1956 
1957 	if (!link_up)
1958 		return rte_eth_linkstatus_set(dev, &link);
1959 
1960 	link.link_status = RTE_ETH_LINK_UP;
1961 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1962 
1963 	switch (link_speed) {
1964 	default:
1965 	case NGBE_LINK_SPEED_UNKNOWN:
1966 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1967 		break;
1968 
1969 	case NGBE_LINK_SPEED_10M_FULL:
1970 		link.link_speed = RTE_ETH_SPEED_NUM_10M;
1971 		lan_speed = 0;
1972 		break;
1973 
1974 	case NGBE_LINK_SPEED_100M_FULL:
1975 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
1976 		lan_speed = 1;
1977 		break;
1978 
1979 	case NGBE_LINK_SPEED_1GB_FULL:
1980 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
1981 		lan_speed = 2;
1982 		break;
1983 	}
1984 
1985 	if (hw->is_pf) {
1986 		wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1987 		if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1988 				NGBE_LINK_SPEED_100M_FULL |
1989 				NGBE_LINK_SPEED_10M_FULL)) {
1990 			wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1991 				NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1992 		}
1993 		/* Re configure MAC RX */
1994 		reg = rd32(hw, NGBE_MACRXCFG);
1995 		wr32(hw, NGBE_MACRXCFG, reg);
1996 		wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_PROMISC,
1997 			NGBE_MACRXFLT_PROMISC);
1998 		reg = rd32(hw, NGBE_MAC_WDG_TIMEOUT);
1999 		wr32(hw, NGBE_MAC_WDG_TIMEOUT, reg);
2000 	}
2001 
2002 	return rte_eth_linkstatus_set(dev, &link);
2003 }
2004 
2005 static int
2006 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2007 {
2008 	return ngbe_dev_link_update_share(dev, wait_to_complete);
2009 }
2010 
2011 static int
2012 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2013 {
2014 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2015 	uint32_t fctrl;
2016 
2017 	fctrl = rd32(hw, NGBE_PSRCTL);
2018 	fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
2019 	wr32(hw, NGBE_PSRCTL, fctrl);
2020 
2021 	return 0;
2022 }
2023 
2024 static int
2025 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2026 {
2027 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2028 	uint32_t fctrl;
2029 
2030 	fctrl = rd32(hw, NGBE_PSRCTL);
2031 	fctrl &= (~NGBE_PSRCTL_UCP);
2032 	if (dev->data->all_multicast == 1)
2033 		fctrl |= NGBE_PSRCTL_MCP;
2034 	else
2035 		fctrl &= (~NGBE_PSRCTL_MCP);
2036 	wr32(hw, NGBE_PSRCTL, fctrl);
2037 
2038 	return 0;
2039 }
2040 
2041 static int
2042 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2043 {
2044 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2045 	uint32_t fctrl;
2046 
2047 	fctrl = rd32(hw, NGBE_PSRCTL);
2048 	fctrl |= NGBE_PSRCTL_MCP;
2049 	wr32(hw, NGBE_PSRCTL, fctrl);
2050 
2051 	return 0;
2052 }
2053 
2054 static int
2055 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2056 {
2057 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2058 	uint32_t fctrl;
2059 
2060 	if (dev->data->promiscuous == 1)
2061 		return 0; /* must remain in all_multicast mode */
2062 
2063 	fctrl = rd32(hw, NGBE_PSRCTL);
2064 	fctrl &= (~NGBE_PSRCTL_MCP);
2065 	wr32(hw, NGBE_PSRCTL, fctrl);
2066 
2067 	return 0;
2068 }
2069 
2070 /**
2071  * It clears the interrupt causes and enables the interrupt.
2072  * It will be called once only during NIC initialized.
2073  *
2074  * @param dev
2075  *  Pointer to struct rte_eth_dev.
2076  * @param on
2077  *  Enable or Disable.
2078  *
2079  * @return
2080  *  - On success, zero.
2081  *  - On failure, a negative value.
2082  */
2083 static int
2084 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2085 {
2086 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2087 
2088 	ngbe_dev_link_status_print(dev);
2089 	if (on != 0) {
2090 		intr->mask_misc |= NGBE_ICRMISC_PHY;
2091 		intr->mask_misc |= NGBE_ICRMISC_GPIO;
2092 	} else {
2093 		intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2094 		intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
2095 	}
2096 
2097 	return 0;
2098 }
2099 
2100 /**
2101  * It clears the interrupt causes and enables the interrupt.
2102  * It will be called once only during NIC initialized.
2103  *
2104  * @param dev
2105  *  Pointer to struct rte_eth_dev.
2106  *
2107  * @return
2108  *  - On success, zero.
2109  *  - On failure, a negative value.
2110  */
2111 static int
2112 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
2113 {
2114 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2115 	u64 mask;
2116 
2117 	mask = NGBE_ICR_MASK;
2118 	mask &= (1ULL << NGBE_MISC_VEC_ID);
2119 	intr->mask |= mask;
2120 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
2121 
2122 	return 0;
2123 }
2124 
2125 /**
2126  * It clears the interrupt causes and enables the interrupt.
2127  * It will be called once only during NIC initialized.
2128  *
2129  * @param dev
2130  *  Pointer to struct rte_eth_dev.
2131  *
2132  * @return
2133  *  - On success, zero.
2134  *  - On failure, a negative value.
2135  */
2136 static int
2137 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2138 {
2139 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2140 	u64 mask;
2141 
2142 	mask = NGBE_ICR_MASK;
2143 	mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
2144 	intr->mask |= mask;
2145 
2146 	return 0;
2147 }
2148 
2149 /**
2150  * It clears the interrupt causes and enables the interrupt.
2151  * It will be called once only during NIC initialized.
2152  *
2153  * @param dev
2154  *  Pointer to struct rte_eth_dev.
2155  *
2156  * @return
2157  *  - On success, zero.
2158  *  - On failure, a negative value.
2159  */
2160 static int
2161 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2162 {
2163 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2164 
2165 	intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
2166 
2167 	return 0;
2168 }
2169 
2170 /*
2171  * It reads ICR and sets flag for the link_update.
2172  *
2173  * @param dev
2174  *  Pointer to struct rte_eth_dev.
2175  *
2176  * @return
2177  *  - On success, zero.
2178  *  - On failure, a negative value.
2179  */
2180 static int
2181 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2182 {
2183 	uint32_t eicr;
2184 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2185 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2186 
2187 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_VEC0];
2188 	if (!eicr) {
2189 		/*
2190 		 * shared interrupt alert!
2191 		 * make sure interrupts are enabled because the read will
2192 		 * have disabled interrupts.
2193 		 */
2194 		if (!hw->adapter_stopped)
2195 			ngbe_enable_intr(dev);
2196 		return 0;
2197 	}
2198 	((u32 *)hw->isb_mem)[NGBE_ISB_VEC0] = 0;
2199 
2200 	/* read-on-clear nic registers here */
2201 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2202 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2203 
2204 	intr->flags = 0;
2205 
2206 	/* set flag for async link update */
2207 	if (eicr & NGBE_ICRMISC_PHY)
2208 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2209 
2210 	if (eicr & NGBE_ICRMISC_VFMBX)
2211 		intr->flags |= NGBE_FLAG_MAILBOX;
2212 
2213 	if (eicr & NGBE_ICRMISC_LNKSEC)
2214 		intr->flags |= NGBE_FLAG_MACSEC;
2215 
2216 	if (eicr & NGBE_ICRMISC_GPIO)
2217 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2218 
2219 	if (eicr & NGBE_ICRMISC_HEAT)
2220 		intr->flags |= NGBE_FLAG_OVERHEAT;
2221 
2222 	((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0;
2223 
2224 	return 0;
2225 }
2226 
2227 /**
2228  * It gets and then prints the link status.
2229  *
2230  * @param dev
2231  *  Pointer to struct rte_eth_dev.
2232  *
2233  * @return
2234  *  - On success, zero.
2235  *  - On failure, a negative value.
2236  */
2237 static void
2238 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2239 {
2240 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2241 	struct rte_eth_link link;
2242 
2243 	rte_eth_linkstatus_get(dev, &link);
2244 
2245 	if (link.link_status == RTE_ETH_LINK_UP) {
2246 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2247 					(int)(dev->data->port_id),
2248 					(unsigned int)link.link_speed,
2249 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2250 					"full-duplex" : "half-duplex");
2251 	} else {
2252 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
2253 				(int)(dev->data->port_id));
2254 	}
2255 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2256 				pci_dev->addr.domain,
2257 				pci_dev->addr.bus,
2258 				pci_dev->addr.devid,
2259 				pci_dev->addr.function);
2260 }
2261 
2262 /*
2263  * It executes link_update after knowing an interrupt occurred.
2264  *
2265  * @param dev
2266  *  Pointer to struct rte_eth_dev.
2267  *
2268  * @return
2269  *  - On success, zero.
2270  *  - On failure, a negative value.
2271  */
2272 static int
2273 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2274 {
2275 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2276 
2277 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2278 
2279 	if (intr->flags & NGBE_FLAG_MAILBOX) {
2280 		ngbe_pf_mbx_process(dev);
2281 		intr->flags &= ~NGBE_FLAG_MAILBOX;
2282 	}
2283 
2284 	if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2285 		struct rte_eth_link link;
2286 
2287 		/*get the link status before link update, for predicting later*/
2288 		rte_eth_linkstatus_get(dev, &link);
2289 
2290 		ngbe_dev_link_update(dev, 0);
2291 		intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2292 		ngbe_dev_link_status_print(dev);
2293 		if (dev->data->dev_link.link_speed != link.link_speed)
2294 			rte_eth_dev_callback_process(dev,
2295 				RTE_ETH_EVENT_INTR_LSC, NULL);
2296 	}
2297 
2298 	if (intr->flags & NGBE_FLAG_OVERHEAT) {
2299 		ngbe_dev_overheat(dev);
2300 		intr->flags &= ~NGBE_FLAG_OVERHEAT;
2301 	}
2302 
2303 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
2304 	ngbe_enable_intr(dev);
2305 
2306 	return 0;
2307 }
2308 
2309 /**
2310  * Interrupt handler triggered by NIC  for handling
2311  * specific interrupt.
2312  *
2313  * @param param
2314  *  The address of parameter (struct rte_eth_dev *) registered before.
2315  */
2316 static void
2317 ngbe_dev_interrupt_handler(void *param)
2318 {
2319 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2320 
2321 	ngbe_dev_interrupt_get_status(dev);
2322 	ngbe_dev_interrupt_action(dev);
2323 }
2324 
2325 static int
2326 ngbe_dev_led_on(struct rte_eth_dev *dev)
2327 {
2328 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2329 	return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
2330 }
2331 
2332 static int
2333 ngbe_dev_led_off(struct rte_eth_dev *dev)
2334 {
2335 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2336 	return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
2337 }
2338 
2339 static int
2340 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2341 {
2342 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2343 	uint32_t mflcn_reg;
2344 	uint32_t fccfg_reg;
2345 	int rx_pause;
2346 	int tx_pause;
2347 
2348 	fc_conf->pause_time = hw->fc.pause_time;
2349 	fc_conf->high_water = hw->fc.high_water;
2350 	fc_conf->low_water = hw->fc.low_water;
2351 	fc_conf->send_xon = hw->fc.send_xon;
2352 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2353 
2354 	/*
2355 	 * Return rx_pause status according to actual setting of
2356 	 * RXFCCFG register.
2357 	 */
2358 	mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2359 	if (mflcn_reg & NGBE_RXFCCFG_FC)
2360 		rx_pause = 1;
2361 	else
2362 		rx_pause = 0;
2363 
2364 	/*
2365 	 * Return tx_pause status according to actual setting of
2366 	 * TXFCCFG register.
2367 	 */
2368 	fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2369 	if (fccfg_reg & NGBE_TXFCCFG_FC)
2370 		tx_pause = 1;
2371 	else
2372 		tx_pause = 0;
2373 
2374 	if (rx_pause && tx_pause)
2375 		fc_conf->mode = RTE_ETH_FC_FULL;
2376 	else if (rx_pause)
2377 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2378 	else if (tx_pause)
2379 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2380 	else
2381 		fc_conf->mode = RTE_ETH_FC_NONE;
2382 
2383 	return 0;
2384 }
2385 
2386 static int
2387 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2388 {
2389 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2390 	int err;
2391 	uint32_t rx_buf_size;
2392 	uint32_t max_high_water;
2393 	enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2394 		ngbe_fc_none,
2395 		ngbe_fc_rx_pause,
2396 		ngbe_fc_tx_pause,
2397 		ngbe_fc_full
2398 	};
2399 
2400 	PMD_INIT_FUNC_TRACE();
2401 
2402 	rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2403 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2404 
2405 	/*
2406 	 * At least reserve one Ethernet frame for watermark
2407 	 * high_water/low_water in kilo bytes for ngbe
2408 	 */
2409 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2410 	if (fc_conf->high_water > max_high_water ||
2411 	    fc_conf->high_water < fc_conf->low_water) {
2412 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2413 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2414 		return -EINVAL;
2415 	}
2416 
2417 	hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2418 	hw->fc.pause_time     = fc_conf->pause_time;
2419 	hw->fc.high_water     = fc_conf->high_water;
2420 	hw->fc.low_water      = fc_conf->low_water;
2421 	hw->fc.send_xon       = fc_conf->send_xon;
2422 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2423 
2424 	err = hw->mac.fc_enable(hw);
2425 
2426 	/* Not negotiated is not an error case */
2427 	if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2428 		wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2429 		      (fc_conf->mac_ctrl_frame_fwd
2430 		       ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2431 		ngbe_flush(hw);
2432 
2433 		return 0;
2434 	}
2435 
2436 	PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2437 	return -EIO;
2438 }
2439 
2440 /* Additional bittime to account for NGBE framing */
2441 #define NGBE_ETH_FRAMING 20
2442 
2443 /*
2444  * ngbe_fc_hpbthresh_set - calculate high water mark for flow control
2445  *
2446  * @dv_id: device interface delay
2447  * @pb: packet buffer to calculate
2448  */
2449 static s32
2450 ngbe_fc_hpbthresh_set(struct rte_eth_dev *dev)
2451 {
2452 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2453 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2454 	u32 max_frame_size, tc, dv_id, rx_pb;
2455 	s32 kb, marker;
2456 
2457 	/* Calculate max LAN frame size */
2458 	max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK);
2459 	tc = max_frame_size + NGBE_ETH_FRAMING;
2460 
2461 	/* Calculate delay value for device */
2462 	dv_id = NGBE_DV(tc, tc);
2463 
2464 	/* Loopback switch introduces additional latency */
2465 	if (pci_dev->max_vfs)
2466 		dv_id += NGBE_B2BT(tc);
2467 
2468 	/* Delay value is calculated in bit times convert to KB */
2469 	kb = NGBE_BT2KB(dv_id);
2470 	rx_pb = rd32(hw, NGBE_PBRXSIZE) >> 10;
2471 
2472 	marker = rx_pb - kb;
2473 
2474 	/* It is possible that the packet buffer is not large enough
2475 	 * to provide required headroom. In this case throw an error
2476 	 * to user and do the best we can.
2477 	 */
2478 	if (marker < 0) {
2479 		PMD_DRV_LOG(WARNING, "Packet Buffer can not provide enough headroom to support flow control.");
2480 		marker = tc + 1;
2481 	}
2482 
2483 	return marker;
2484 }
2485 
2486 /*
2487  * ngbe_fc_lpbthresh_set - calculate low water mark for flow control
2488  *
2489  * @dv_id: device interface delay
2490  */
2491 static s32
2492 ngbe_fc_lpbthresh_set(struct rte_eth_dev *dev)
2493 {
2494 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2495 	u32 max_frame_size, tc, dv_id;
2496 	s32 kb;
2497 
2498 	/* Calculate max LAN frame size */
2499 	max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK);
2500 	tc = max_frame_size + NGBE_ETH_FRAMING;
2501 
2502 	/* Calculate delay value for device */
2503 	dv_id = NGBE_LOW_DV(tc);
2504 
2505 	/* Delay value is calculated in bit times convert to KB */
2506 	kb = NGBE_BT2KB(dv_id);
2507 
2508 	return kb;
2509 }
2510 
2511 /*
2512  * ngbe_pbthresh_setup - calculate and setup high low water marks
2513  */
2514 static void
2515 ngbe_pbthresh_set(struct rte_eth_dev *dev)
2516 {
2517 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2518 
2519 	hw->fc.high_water = ngbe_fc_hpbthresh_set(dev);
2520 	hw->fc.low_water = ngbe_fc_lpbthresh_set(dev);
2521 
2522 	/* Low water marks must not be larger than high water marks */
2523 	if (hw->fc.low_water > hw->fc.high_water)
2524 		hw->fc.low_water = 0;
2525 }
2526 
2527 int
2528 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2529 			  struct rte_eth_rss_reta_entry64 *reta_conf,
2530 			  uint16_t reta_size)
2531 {
2532 	uint8_t i, j, mask;
2533 	uint32_t reta;
2534 	uint16_t idx, shift;
2535 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2536 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2537 
2538 	PMD_INIT_FUNC_TRACE();
2539 
2540 	if (!hw->is_pf) {
2541 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2542 			"NIC.");
2543 		return -ENOTSUP;
2544 	}
2545 
2546 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2547 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2548 			"(%d) doesn't match the number hardware can supported "
2549 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2550 		return -EINVAL;
2551 	}
2552 
2553 	for (i = 0; i < reta_size; i += 4) {
2554 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2555 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2556 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2557 		if (!mask)
2558 			continue;
2559 
2560 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2561 		for (j = 0; j < 4; j++) {
2562 			if (RS8(mask, j, 0x1)) {
2563 				reta  &= ~(MS32(8 * j, 0xFF));
2564 				reta |= LS32(reta_conf[idx].reta[shift + j],
2565 						8 * j, 0xFF);
2566 			}
2567 		}
2568 		wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2569 	}
2570 	adapter->rss_reta_updated = 1;
2571 
2572 	return 0;
2573 }
2574 
2575 int
2576 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2577 			 struct rte_eth_rss_reta_entry64 *reta_conf,
2578 			 uint16_t reta_size)
2579 {
2580 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2581 	uint8_t i, j, mask;
2582 	uint32_t reta;
2583 	uint16_t idx, shift;
2584 
2585 	PMD_INIT_FUNC_TRACE();
2586 
2587 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2588 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2589 			"(%d) doesn't match the number hardware can supported "
2590 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2591 		return -EINVAL;
2592 	}
2593 
2594 	for (i = 0; i < reta_size; i += 4) {
2595 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2596 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2597 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2598 		if (!mask)
2599 			continue;
2600 
2601 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2602 		for (j = 0; j < 4; j++) {
2603 			if (RS8(mask, j, 0x1))
2604 				reta_conf[idx].reta[shift + j] =
2605 					(uint16_t)RS32(reta, 8 * j, 0xFF);
2606 		}
2607 	}
2608 
2609 	return 0;
2610 }
2611 
2612 static int
2613 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2614 				uint32_t index, uint32_t pool)
2615 {
2616 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2617 	uint32_t enable_addr = 1;
2618 
2619 	return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2620 			     pool, enable_addr);
2621 }
2622 
2623 static void
2624 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2625 {
2626 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2627 
2628 	ngbe_clear_rar(hw, index);
2629 }
2630 
2631 static int
2632 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2633 {
2634 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2635 
2636 	ngbe_remove_rar(dev, 0);
2637 	ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2638 
2639 	return 0;
2640 }
2641 
2642 static int
2643 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2644 {
2645 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2646 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
2647 	struct rte_eth_dev_data *dev_data = dev->data;
2648 
2649 	/* If device is started, refuse mtu that requires the support of
2650 	 * scattered packets when this feature has not been enabled before.
2651 	 */
2652 	if (dev_data->dev_started && !dev_data->scattered_rx &&
2653 	    (frame_size + 2 * RTE_VLAN_HLEN >
2654 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2655 		PMD_INIT_LOG(ERR, "Stop port first.");
2656 		return -EINVAL;
2657 	}
2658 
2659 	wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2660 		NGBE_FRMSZ_MAX(frame_size));
2661 
2662 	return 0;
2663 }
2664 
2665 static uint32_t
2666 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2667 {
2668 	uint32_t vector = 0;
2669 
2670 	switch (hw->mac.mc_filter_type) {
2671 	case 0:   /* use bits [47:36] of the address */
2672 		vector = ((uc_addr->addr_bytes[4] >> 4) |
2673 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
2674 		break;
2675 	case 1:   /* use bits [46:35] of the address */
2676 		vector = ((uc_addr->addr_bytes[4] >> 3) |
2677 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
2678 		break;
2679 	case 2:   /* use bits [45:34] of the address */
2680 		vector = ((uc_addr->addr_bytes[4] >> 2) |
2681 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
2682 		break;
2683 	case 3:   /* use bits [43:32] of the address */
2684 		vector = ((uc_addr->addr_bytes[4]) |
2685 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
2686 		break;
2687 	default:  /* Invalid mc_filter_type */
2688 		break;
2689 	}
2690 
2691 	/* vector can only be 12-bits or boundary will be exceeded */
2692 	vector &= 0xFFF;
2693 	return vector;
2694 }
2695 
2696 static int
2697 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2698 			struct rte_ether_addr *mac_addr, uint8_t on)
2699 {
2700 	uint32_t vector;
2701 	uint32_t uta_idx;
2702 	uint32_t reg_val;
2703 	uint32_t uta_mask;
2704 	uint32_t psrctl;
2705 
2706 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2707 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2708 
2709 	vector = ngbe_uta_vector(hw, mac_addr);
2710 	uta_idx = (vector >> 5) & 0x7F;
2711 	uta_mask = 0x1UL << (vector & 0x1F);
2712 
2713 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2714 		return 0;
2715 
2716 	reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2717 	if (on) {
2718 		uta_info->uta_in_use++;
2719 		reg_val |= uta_mask;
2720 		uta_info->uta_shadow[uta_idx] |= uta_mask;
2721 	} else {
2722 		uta_info->uta_in_use--;
2723 		reg_val &= ~uta_mask;
2724 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2725 	}
2726 
2727 	wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2728 
2729 	psrctl = rd32(hw, NGBE_PSRCTL);
2730 	if (uta_info->uta_in_use > 0)
2731 		psrctl |= NGBE_PSRCTL_UCHFENA;
2732 	else
2733 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2734 
2735 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2736 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2737 	wr32(hw, NGBE_PSRCTL, psrctl);
2738 
2739 	return 0;
2740 }
2741 
2742 static int
2743 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2744 {
2745 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2746 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2747 	uint32_t psrctl;
2748 	int i;
2749 
2750 	if (on) {
2751 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2752 			uta_info->uta_shadow[i] = ~0;
2753 			wr32(hw, NGBE_UCADDRTBL(i), ~0);
2754 		}
2755 	} else {
2756 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2757 			uta_info->uta_shadow[i] = 0;
2758 			wr32(hw, NGBE_UCADDRTBL(i), 0);
2759 		}
2760 	}
2761 
2762 	psrctl = rd32(hw, NGBE_PSRCTL);
2763 	if (on)
2764 		psrctl |= NGBE_PSRCTL_UCHFENA;
2765 	else
2766 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2767 
2768 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2769 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2770 	wr32(hw, NGBE_PSRCTL, psrctl);
2771 
2772 	return 0;
2773 }
2774 
2775 static int
2776 ngbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2777 {
2778 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2779 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2780 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2781 	uint32_t mask;
2782 
2783 	mask = rd32(hw, NGBE_IMC(0));
2784 	mask |= (1 << queue_id);
2785 	wr32(hw, NGBE_IMC(0), mask);
2786 	rte_intr_enable(intr_handle);
2787 
2788 	return 0;
2789 }
2790 
2791 static int
2792 ngbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2793 {
2794 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2795 	uint32_t mask;
2796 
2797 	mask = rd32(hw, NGBE_IMS(0));
2798 	mask |= (1 << queue_id);
2799 	wr32(hw, NGBE_IMS(0), mask);
2800 
2801 	return 0;
2802 }
2803 
2804 /**
2805  * Set the IVAR registers, mapping interrupt causes to vectors
2806  * @param hw
2807  *  pointer to ngbe_hw struct
2808  * @direction
2809  *  0 for Rx, 1 for Tx, -1 for other causes
2810  * @queue
2811  *  queue to map the corresponding interrupt to
2812  * @msix_vector
2813  *  the vector to map to the corresponding queue
2814  */
2815 void
2816 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2817 		   uint8_t queue, uint8_t msix_vector)
2818 {
2819 	uint32_t tmp, idx;
2820 
2821 	if (direction == -1) {
2822 		/* other causes */
2823 		msix_vector |= NGBE_IVARMISC_VLD;
2824 		idx = 0;
2825 		tmp = rd32(hw, NGBE_IVARMISC);
2826 		tmp &= ~(0xFF << idx);
2827 		tmp |= (msix_vector << idx);
2828 		wr32(hw, NGBE_IVARMISC, tmp);
2829 	} else {
2830 		/* rx or tx causes */
2831 		msix_vector |= NGBE_IVAR_VLD; /* Workaround for ICR lost */
2832 		idx = ((16 * (queue & 1)) + (8 * direction));
2833 		tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2834 		tmp &= ~(0xFF << idx);
2835 		tmp |= (msix_vector << idx);
2836 		wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2837 	}
2838 }
2839 
2840 /**
2841  * Sets up the hardware to properly generate MSI-X interrupts
2842  * @hw
2843  *  board private structure
2844  */
2845 static void
2846 ngbe_configure_msix(struct rte_eth_dev *dev)
2847 {
2848 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2849 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2850 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2851 	uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2852 	uint32_t vec = NGBE_MISC_VEC_ID;
2853 	uint32_t gpie;
2854 
2855 	/*
2856 	 * Won't configure MSI-X register if no mapping is done
2857 	 * between intr vector and event fd
2858 	 * but if MSI-X has been enabled already, need to configure
2859 	 * auto clean, auto mask and throttling.
2860 	 */
2861 	gpie = rd32(hw, NGBE_GPIE);
2862 	if (!rte_intr_dp_is_en(intr_handle) &&
2863 	    !(gpie & NGBE_GPIE_MSIX))
2864 		return;
2865 
2866 	if (rte_intr_allow_others(intr_handle)) {
2867 		base = NGBE_RX_VEC_START;
2868 		vec = base;
2869 	}
2870 
2871 	/* setup GPIE for MSI-X mode */
2872 	gpie = rd32(hw, NGBE_GPIE);
2873 	gpie |= NGBE_GPIE_MSIX;
2874 	wr32(hw, NGBE_GPIE, gpie);
2875 
2876 	/* Populate the IVAR table and set the ITR values to the
2877 	 * corresponding register.
2878 	 */
2879 	if (rte_intr_dp_is_en(intr_handle)) {
2880 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2881 			queue_id++) {
2882 			/* by default, 1:1 mapping */
2883 			ngbe_set_ivar_map(hw, 0, queue_id, vec);
2884 			rte_intr_vec_list_index_set(intr_handle,
2885 							   queue_id, vec);
2886 			if (vec < base + rte_intr_nb_efd_get(intr_handle)
2887 			    - 1)
2888 				vec++;
2889 		}
2890 
2891 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2892 	}
2893 	wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2894 			NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2895 			| NGBE_ITR_WRDSA);
2896 }
2897 
2898 static u8 *
2899 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2900 			u8 **mc_addr_ptr, u32 *vmdq)
2901 {
2902 	u8 *mc_addr;
2903 
2904 	*vmdq = 0;
2905 	mc_addr = *mc_addr_ptr;
2906 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2907 	return mc_addr;
2908 }
2909 
2910 int
2911 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2912 			  struct rte_ether_addr *mc_addr_set,
2913 			  uint32_t nb_mc_addr)
2914 {
2915 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2916 	u8 *mc_addr_list;
2917 
2918 	mc_addr_list = (u8 *)mc_addr_set;
2919 	return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2920 					 ngbe_dev_addr_list_itr, TRUE);
2921 }
2922 
2923 static uint64_t
2924 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
2925 {
2926 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2927 	uint64_t systime_cycles;
2928 
2929 	systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
2930 	systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
2931 
2932 	return systime_cycles;
2933 }
2934 
2935 static uint64_t
2936 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2937 {
2938 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2939 	uint64_t rx_tstamp_cycles;
2940 
2941 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
2942 	rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
2943 	rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
2944 
2945 	return rx_tstamp_cycles;
2946 }
2947 
2948 static uint64_t
2949 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2950 {
2951 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2952 	uint64_t tx_tstamp_cycles;
2953 
2954 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
2955 	tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
2956 	tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
2957 
2958 	return tx_tstamp_cycles;
2959 }
2960 
2961 static void
2962 ngbe_start_timecounters(struct rte_eth_dev *dev)
2963 {
2964 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2965 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2966 	uint32_t incval = 0;
2967 	uint32_t shift = 0;
2968 
2969 	incval = NGBE_INCVAL_1GB;
2970 	shift = NGBE_INCVAL_SHIFT_1GB;
2971 
2972 	wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
2973 
2974 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
2975 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2976 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2977 
2978 	adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2979 	adapter->systime_tc.cc_shift = shift;
2980 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
2981 
2982 	adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2983 	adapter->rx_tstamp_tc.cc_shift = shift;
2984 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2985 
2986 	adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2987 	adapter->tx_tstamp_tc.cc_shift = shift;
2988 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2989 }
2990 
2991 static int
2992 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2993 {
2994 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2995 
2996 	adapter->systime_tc.nsec += delta;
2997 	adapter->rx_tstamp_tc.nsec += delta;
2998 	adapter->tx_tstamp_tc.nsec += delta;
2999 
3000 	return 0;
3001 }
3002 
3003 static int
3004 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
3005 {
3006 	uint64_t ns;
3007 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
3008 
3009 	ns = rte_timespec_to_ns(ts);
3010 	/* Set the timecounters to a new value. */
3011 	adapter->systime_tc.nsec = ns;
3012 	adapter->rx_tstamp_tc.nsec = ns;
3013 	adapter->tx_tstamp_tc.nsec = ns;
3014 
3015 	return 0;
3016 }
3017 
3018 static int
3019 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
3020 {
3021 	uint64_t ns, systime_cycles;
3022 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
3023 
3024 	systime_cycles = ngbe_read_systime_cyclecounter(dev);
3025 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
3026 	*ts = rte_ns_to_timespec(ns);
3027 
3028 	return 0;
3029 }
3030 
3031 static int
3032 ngbe_timesync_enable(struct rte_eth_dev *dev)
3033 {
3034 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3035 	uint32_t tsync_ctl;
3036 
3037 	/* Stop the timesync system time. */
3038 	wr32(hw, NGBE_TSTIMEINC, 0x0);
3039 	/* Reset the timesync system time value. */
3040 	wr32(hw, NGBE_TSTIMEL, 0x0);
3041 	wr32(hw, NGBE_TSTIMEH, 0x0);
3042 
3043 	ngbe_start_timecounters(dev);
3044 
3045 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3046 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588),
3047 		RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588);
3048 
3049 	/* Enable timestamping of received PTP packets. */
3050 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
3051 	tsync_ctl |= NGBE_TSRXCTL_ENA;
3052 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
3053 
3054 	/* Enable timestamping of transmitted PTP packets. */
3055 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
3056 	tsync_ctl |= NGBE_TSTXCTL_ENA;
3057 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
3058 
3059 	ngbe_flush(hw);
3060 
3061 	return 0;
3062 }
3063 
3064 static int
3065 ngbe_timesync_disable(struct rte_eth_dev *dev)
3066 {
3067 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3068 	uint32_t tsync_ctl;
3069 
3070 	/* Disable timestamping of transmitted PTP packets. */
3071 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
3072 	tsync_ctl &= ~NGBE_TSTXCTL_ENA;
3073 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
3074 
3075 	/* Disable timestamping of received PTP packets. */
3076 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
3077 	tsync_ctl &= ~NGBE_TSRXCTL_ENA;
3078 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
3079 
3080 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3081 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
3082 
3083 	/* Stop incrementing the System Time registers. */
3084 	wr32(hw, NGBE_TSTIMEINC, 0);
3085 
3086 	return 0;
3087 }
3088 
3089 static int
3090 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3091 				 struct timespec *timestamp,
3092 				 uint32_t flags __rte_unused)
3093 {
3094 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3095 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
3096 	uint32_t tsync_rxctl;
3097 	uint64_t rx_tstamp_cycles;
3098 	uint64_t ns;
3099 
3100 	tsync_rxctl = rd32(hw, NGBE_TSRXCTL);
3101 	if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0)
3102 		return -EINVAL;
3103 
3104 	rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev);
3105 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
3106 	*timestamp = rte_ns_to_timespec(ns);
3107 
3108 	return  0;
3109 }
3110 
3111 static int
3112 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3113 				 struct timespec *timestamp)
3114 {
3115 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3116 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
3117 	uint32_t tsync_txctl;
3118 	uint64_t tx_tstamp_cycles;
3119 	uint64_t ns;
3120 
3121 	tsync_txctl = rd32(hw, NGBE_TSTXCTL);
3122 	if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0)
3123 		return -EINVAL;
3124 
3125 	tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev);
3126 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
3127 	*timestamp = rte_ns_to_timespec(ns);
3128 
3129 	return 0;
3130 }
3131 
3132 static int
3133 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
3134 {
3135 	int count = 0;
3136 	int g_ind = 0;
3137 	const struct reg_info *reg_group;
3138 	const struct reg_info **reg_set = ngbe_regs_others;
3139 
3140 	while ((reg_group = reg_set[g_ind++]))
3141 		count += ngbe_regs_group_count(reg_group);
3142 
3143 	return count;
3144 }
3145 
3146 static int
3147 ngbe_get_regs(struct rte_eth_dev *dev,
3148 	      struct rte_dev_reg_info *regs)
3149 {
3150 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3151 	uint32_t *data = regs->data;
3152 	int g_ind = 0;
3153 	int count = 0;
3154 	const struct reg_info *reg_group;
3155 	const struct reg_info **reg_set = ngbe_regs_others;
3156 
3157 	if (data == NULL) {
3158 		regs->length = ngbe_get_reg_length(dev);
3159 		regs->width = sizeof(uint32_t);
3160 		return 0;
3161 	}
3162 
3163 	/* Support only full register dump */
3164 	if (regs->length == 0 ||
3165 	    regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
3166 		regs->version = hw->mac.type << 24 |
3167 				hw->revision_id << 16 |
3168 				hw->device_id;
3169 		while ((reg_group = reg_set[g_ind++]))
3170 			count += ngbe_read_regs_group(dev, &data[count],
3171 						      reg_group);
3172 		return 0;
3173 	}
3174 
3175 	return -ENOTSUP;
3176 }
3177 
3178 static int
3179 ngbe_get_eeprom_length(struct rte_eth_dev *dev)
3180 {
3181 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3182 
3183 	/* Return unit is byte count */
3184 	return hw->rom.word_size * 2;
3185 }
3186 
3187 static int
3188 ngbe_get_eeprom(struct rte_eth_dev *dev,
3189 		struct rte_dev_eeprom_info *in_eeprom)
3190 {
3191 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3192 	struct ngbe_rom_info *eeprom = &hw->rom;
3193 	uint16_t *data = in_eeprom->data;
3194 	int first, length;
3195 
3196 	first = in_eeprom->offset >> 1;
3197 	length = in_eeprom->length >> 1;
3198 	if (first > hw->rom.word_size ||
3199 	    ((first + length) > hw->rom.word_size))
3200 		return -EINVAL;
3201 
3202 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3203 
3204 	return eeprom->readw_buffer(hw, first, length, data);
3205 }
3206 
3207 static int
3208 ngbe_set_eeprom(struct rte_eth_dev *dev,
3209 		struct rte_dev_eeprom_info *in_eeprom)
3210 {
3211 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3212 	struct ngbe_rom_info *eeprom = &hw->rom;
3213 	uint16_t *data = in_eeprom->data;
3214 	int first, length;
3215 
3216 	first = in_eeprom->offset >> 1;
3217 	length = in_eeprom->length >> 1;
3218 	if (first > hw->rom.word_size ||
3219 	    ((first + length) > hw->rom.word_size))
3220 		return -EINVAL;
3221 
3222 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3223 
3224 	return eeprom->writew_buffer(hw,  first, length, data);
3225 }
3226 
3227 static const struct eth_dev_ops ngbe_eth_dev_ops = {
3228 	.dev_configure              = ngbe_dev_configure,
3229 	.dev_infos_get              = ngbe_dev_info_get,
3230 	.dev_start                  = ngbe_dev_start,
3231 	.dev_stop                   = ngbe_dev_stop,
3232 	.dev_set_link_up            = ngbe_dev_set_link_up,
3233 	.dev_set_link_down          = ngbe_dev_set_link_down,
3234 	.dev_close                  = ngbe_dev_close,
3235 	.dev_reset                  = ngbe_dev_reset,
3236 	.promiscuous_enable         = ngbe_dev_promiscuous_enable,
3237 	.promiscuous_disable        = ngbe_dev_promiscuous_disable,
3238 	.allmulticast_enable        = ngbe_dev_allmulticast_enable,
3239 	.allmulticast_disable       = ngbe_dev_allmulticast_disable,
3240 	.link_update                = ngbe_dev_link_update,
3241 	.stats_get                  = ngbe_dev_stats_get,
3242 	.xstats_get                 = ngbe_dev_xstats_get,
3243 	.xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
3244 	.stats_reset                = ngbe_dev_stats_reset,
3245 	.xstats_reset               = ngbe_dev_xstats_reset,
3246 	.xstats_get_names           = ngbe_dev_xstats_get_names,
3247 	.xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
3248 	.fw_version_get             = ngbe_fw_version_get,
3249 	.dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
3250 	.mtu_set                    = ngbe_dev_mtu_set,
3251 	.vlan_filter_set            = ngbe_vlan_filter_set,
3252 	.vlan_tpid_set              = ngbe_vlan_tpid_set,
3253 	.vlan_offload_set           = ngbe_vlan_offload_set,
3254 	.vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
3255 	.rx_queue_start	            = ngbe_dev_rx_queue_start,
3256 	.rx_queue_stop              = ngbe_dev_rx_queue_stop,
3257 	.tx_queue_start	            = ngbe_dev_tx_queue_start,
3258 	.tx_queue_stop              = ngbe_dev_tx_queue_stop,
3259 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
3260 	.rx_queue_release           = ngbe_dev_rx_queue_release,
3261 	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
3262 	.tx_queue_release           = ngbe_dev_tx_queue_release,
3263 	.rx_queue_intr_enable       = ngbe_dev_rx_queue_intr_enable,
3264 	.rx_queue_intr_disable      = ngbe_dev_rx_queue_intr_disable,
3265 	.dev_led_on                 = ngbe_dev_led_on,
3266 	.dev_led_off                = ngbe_dev_led_off,
3267 	.flow_ctrl_get              = ngbe_flow_ctrl_get,
3268 	.flow_ctrl_set              = ngbe_flow_ctrl_set,
3269 	.mac_addr_add               = ngbe_add_rar,
3270 	.mac_addr_remove            = ngbe_remove_rar,
3271 	.mac_addr_set               = ngbe_set_default_mac_addr,
3272 	.uc_hash_table_set          = ngbe_uc_hash_table_set,
3273 	.uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
3274 	.reta_update                = ngbe_dev_rss_reta_update,
3275 	.reta_query                 = ngbe_dev_rss_reta_query,
3276 	.rss_hash_update            = ngbe_dev_rss_hash_update,
3277 	.rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
3278 	.set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
3279 	.rxq_info_get               = ngbe_rxq_info_get,
3280 	.txq_info_get               = ngbe_txq_info_get,
3281 	.rx_burst_mode_get          = ngbe_rx_burst_mode_get,
3282 	.tx_burst_mode_get          = ngbe_tx_burst_mode_get,
3283 	.timesync_enable            = ngbe_timesync_enable,
3284 	.timesync_disable           = ngbe_timesync_disable,
3285 	.timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
3286 	.timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp,
3287 	.get_reg                    = ngbe_get_regs,
3288 	.get_eeprom_length          = ngbe_get_eeprom_length,
3289 	.get_eeprom                 = ngbe_get_eeprom,
3290 	.set_eeprom                 = ngbe_set_eeprom,
3291 	.timesync_adjust_time       = ngbe_timesync_adjust_time,
3292 	.timesync_read_time         = ngbe_timesync_read_time,
3293 	.timesync_write_time        = ngbe_timesync_write_time,
3294 	.tx_done_cleanup            = ngbe_dev_tx_done_cleanup,
3295 };
3296 
3297 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
3298 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
3299 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
3300 
3301 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
3302 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
3303 
3304 #ifdef RTE_ETHDEV_DEBUG_RX
3305 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
3306 #endif
3307 #ifdef RTE_ETHDEV_DEBUG_TX
3308 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
3309 #endif
3310