xref: /dpdk/drivers/net/ngbe/ngbe_ethdev.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9 
10 #include <rte_alarm.h>
11 
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16 #include "ngbe_regs_group.h"
17 
18 static const struct reg_info ngbe_regs_general[] = {
19 	{NGBE_RST, 1, 1, "NGBE_RST"},
20 	{NGBE_STAT, 1, 1, "NGBE_STAT"},
21 	{NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
22 	{NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
23 	{NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
24 	{NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
25 	{0, 0, 0, ""}
26 };
27 
28 static const struct reg_info ngbe_regs_nvm[] = {
29 	{0, 0, 0, ""}
30 };
31 
32 static const struct reg_info ngbe_regs_interrupt[] = {
33 	{0, 0, 0, ""}
34 };
35 
36 static const struct reg_info ngbe_regs_fctl_others[] = {
37 	{0, 0, 0, ""}
38 };
39 
40 static const struct reg_info ngbe_regs_rxdma[] = {
41 	{0, 0, 0, ""}
42 };
43 
44 static const struct reg_info ngbe_regs_rx[] = {
45 	{0, 0, 0, ""}
46 };
47 
48 static struct reg_info ngbe_regs_tx[] = {
49 	{0, 0, 0, ""}
50 };
51 
52 static const struct reg_info ngbe_regs_wakeup[] = {
53 	{0, 0, 0, ""}
54 };
55 
56 static const struct reg_info ngbe_regs_mac[] = {
57 	{0, 0, 0, ""}
58 };
59 
60 static const struct reg_info ngbe_regs_diagnostic[] = {
61 	{0, 0, 0, ""},
62 };
63 
64 /* PF registers */
65 static const struct reg_info *ngbe_regs_others[] = {
66 				ngbe_regs_general,
67 				ngbe_regs_nvm,
68 				ngbe_regs_interrupt,
69 				ngbe_regs_fctl_others,
70 				ngbe_regs_rxdma,
71 				ngbe_regs_rx,
72 				ngbe_regs_tx,
73 				ngbe_regs_wakeup,
74 				ngbe_regs_mac,
75 				ngbe_regs_diagnostic,
76 				NULL};
77 
78 static int ngbe_dev_close(struct rte_eth_dev *dev);
79 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
80 				int wait_to_complete);
81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
84 					uint16_t queue);
85 
86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
91 static void ngbe_dev_interrupt_handler(void *param);
92 static void ngbe_configure_msix(struct rte_eth_dev *dev);
93 static void ngbe_pbthresh_set(struct rte_eth_dev *dev);
94 
95 #define NGBE_SET_HWSTRIP(h, q) do {\
96 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
97 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
98 		(h)->bitmap[idx] |= 1 << bit;\
99 	} while (0)
100 
101 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
102 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
103 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
104 		(h)->bitmap[idx] &= ~(1 << bit);\
105 	} while (0)
106 
107 #define NGBE_GET_HWSTRIP(h, q, r) do {\
108 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
109 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
110 		(r) = (h)->bitmap[idx] >> bit & 1;\
111 	} while (0)
112 
113 /*
114  * The set of PCI devices this driver supports
115  */
116 static const struct rte_pci_id pci_id_ngbe_map[] = {
117 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
118 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
119 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
120 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
121 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
122 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
123 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
124 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
128 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
129 	{ .vendor_id = 0, /* sentinel */ },
130 };
131 
132 static const struct rte_eth_desc_lim rx_desc_lim = {
133 	.nb_max = NGBE_RING_DESC_MAX,
134 	.nb_min = NGBE_RING_DESC_MIN,
135 	.nb_align = NGBE_RXD_ALIGN,
136 };
137 
138 static const struct rte_eth_desc_lim tx_desc_lim = {
139 	.nb_max = NGBE_RING_DESC_MAX,
140 	.nb_min = NGBE_RING_DESC_MIN,
141 	.nb_align = NGBE_TXD_ALIGN,
142 	.nb_seg_max = NGBE_TX_MAX_SEG,
143 	.nb_mtu_seg_max = NGBE_TX_MAX_SEG,
144 };
145 
146 static const struct eth_dev_ops ngbe_eth_dev_ops;
147 
148 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
149 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
150 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
151 	/* MNG RxTx */
152 	HW_XSTAT(mng_bmc2host_packets),
153 	HW_XSTAT(mng_host2bmc_packets),
154 	/* Basic RxTx */
155 	HW_XSTAT(rx_packets),
156 	HW_XSTAT(tx_packets),
157 	HW_XSTAT(rx_bytes),
158 	HW_XSTAT(tx_bytes),
159 	HW_XSTAT(rx_total_bytes),
160 	HW_XSTAT(rx_total_packets),
161 	HW_XSTAT(tx_total_packets),
162 	HW_XSTAT(rx_total_missed_packets),
163 	HW_XSTAT(rx_broadcast_packets),
164 	HW_XSTAT(tx_broadcast_packets),
165 	HW_XSTAT(rx_multicast_packets),
166 	HW_XSTAT(tx_multicast_packets),
167 	HW_XSTAT(rx_management_packets),
168 	HW_XSTAT(tx_management_packets),
169 	HW_XSTAT(rx_management_dropped),
170 	HW_XSTAT(rx_dma_drop),
171 	HW_XSTAT(tx_dma_drop),
172 	HW_XSTAT(tx_secdrp_packets),
173 
174 	/* Basic Error */
175 	HW_XSTAT(rx_crc_errors),
176 	HW_XSTAT(rx_illegal_byte_errors),
177 	HW_XSTAT(rx_error_bytes),
178 	HW_XSTAT(rx_mac_short_packet_dropped),
179 	HW_XSTAT(rx_length_errors),
180 	HW_XSTAT(rx_undersize_errors),
181 	HW_XSTAT(rx_fragment_errors),
182 	HW_XSTAT(rx_oversize_cnt),
183 	HW_XSTAT(rx_jabber_errors),
184 	HW_XSTAT(rx_l3_l4_xsum_error),
185 	HW_XSTAT(mac_local_errors),
186 	HW_XSTAT(mac_remote_errors),
187 
188 	/* PB Stats */
189 	HW_XSTAT(rx_up_dropped),
190 	HW_XSTAT(rdb_pkt_cnt),
191 	HW_XSTAT(rdb_repli_cnt),
192 	HW_XSTAT(rdb_drp_cnt),
193 
194 	/* MACSEC */
195 	HW_XSTAT(tx_macsec_pkts_untagged),
196 	HW_XSTAT(tx_macsec_pkts_encrypted),
197 	HW_XSTAT(tx_macsec_pkts_protected),
198 	HW_XSTAT(tx_macsec_octets_encrypted),
199 	HW_XSTAT(tx_macsec_octets_protected),
200 	HW_XSTAT(rx_macsec_pkts_untagged),
201 	HW_XSTAT(rx_macsec_pkts_badtag),
202 	HW_XSTAT(rx_macsec_pkts_nosci),
203 	HW_XSTAT(rx_macsec_pkts_unknownsci),
204 	HW_XSTAT(rx_macsec_octets_decrypted),
205 	HW_XSTAT(rx_macsec_octets_validated),
206 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
207 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
208 	HW_XSTAT(rx_macsec_sc_pkts_late),
209 	HW_XSTAT(rx_macsec_sa_pkts_ok),
210 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
211 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
212 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
213 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
214 
215 	/* MAC RxTx */
216 	HW_XSTAT(rx_size_64_packets),
217 	HW_XSTAT(rx_size_65_to_127_packets),
218 	HW_XSTAT(rx_size_128_to_255_packets),
219 	HW_XSTAT(rx_size_256_to_511_packets),
220 	HW_XSTAT(rx_size_512_to_1023_packets),
221 	HW_XSTAT(rx_size_1024_to_max_packets),
222 	HW_XSTAT(tx_size_64_packets),
223 	HW_XSTAT(tx_size_65_to_127_packets),
224 	HW_XSTAT(tx_size_128_to_255_packets),
225 	HW_XSTAT(tx_size_256_to_511_packets),
226 	HW_XSTAT(tx_size_512_to_1023_packets),
227 	HW_XSTAT(tx_size_1024_to_max_packets),
228 
229 	/* Flow Control */
230 	HW_XSTAT(tx_xon_packets),
231 	HW_XSTAT(rx_xon_packets),
232 	HW_XSTAT(tx_xoff_packets),
233 	HW_XSTAT(rx_xoff_packets),
234 
235 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
236 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
237 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
238 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
239 };
240 
241 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
242 			   sizeof(rte_ngbe_stats_strings[0]))
243 
244 /* Per-queue statistics */
245 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
246 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
247 	QP_XSTAT(rx_qp_packets),
248 	QP_XSTAT(tx_qp_packets),
249 	QP_XSTAT(rx_qp_bytes),
250 	QP_XSTAT(tx_qp_bytes),
251 	QP_XSTAT(rx_qp_mc_packets),
252 };
253 
254 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
255 			   sizeof(rte_ngbe_qp_strings[0]))
256 
257 static inline int32_t
258 ngbe_pf_reset_hw(struct ngbe_hw *hw)
259 {
260 	uint32_t ctrl_ext;
261 	int32_t status;
262 
263 	status = hw->mac.reset_hw(hw);
264 
265 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
266 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
267 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
268 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
269 	ngbe_flush(hw);
270 
271 	if (status == NGBE_ERR_SFP_NOT_PRESENT)
272 		status = 0;
273 	return status;
274 }
275 
276 static inline void
277 ngbe_enable_intr(struct rte_eth_dev *dev)
278 {
279 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
280 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
281 
282 	wr32(hw, NGBE_IENMISC, intr->mask_misc);
283 	wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
284 	ngbe_flush(hw);
285 }
286 
287 static void
288 ngbe_disable_intr(struct ngbe_hw *hw)
289 {
290 	PMD_INIT_FUNC_TRACE();
291 
292 	wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
293 	ngbe_flush(hw);
294 }
295 
296 /*
297  * Ensure that all locks are released before first NVM or PHY access
298  */
299 static void
300 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
301 {
302 	uint16_t mask;
303 
304 	/*
305 	 * These ones are more tricky since they are common to all ports; but
306 	 * swfw_sync retries last long enough (1s) to be almost sure that if
307 	 * lock can not be taken it is due to an improper lock of the
308 	 * semaphore.
309 	 */
310 	mask = NGBE_MNGSEM_SWPHY |
311 	       NGBE_MNGSEM_SWMBX |
312 	       NGBE_MNGSEM_SWFLASH;
313 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
314 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
315 
316 	hw->mac.release_swfw_sync(hw, mask);
317 }
318 
319 static int
320 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
321 {
322 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
323 	struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
324 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
325 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
326 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
327 	const struct rte_memzone *mz;
328 	uint32_t ctrl_ext;
329 	u32 led_conf = 0;
330 	int err, ret;
331 
332 	PMD_INIT_FUNC_TRACE();
333 
334 	eth_dev->dev_ops = &ngbe_eth_dev_ops;
335 	eth_dev->rx_queue_count       = ngbe_dev_rx_queue_count;
336 	eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
337 	eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
338 	eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
339 	eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
340 	eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
341 
342 	/*
343 	 * For secondary processes, we don't initialise any further as primary
344 	 * has already done this work. Only check we don't need a different
345 	 * Rx and Tx function.
346 	 */
347 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
348 		struct ngbe_tx_queue *txq;
349 		/* Tx queue function in primary, set by last queue initialized
350 		 * Tx queue may not initialized by primary process
351 		 */
352 		if (eth_dev->data->tx_queues) {
353 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
354 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
355 			ngbe_set_tx_function(eth_dev, txq);
356 		} else {
357 			/* Use default Tx function if we get here */
358 			PMD_INIT_LOG(NOTICE,
359 				"No Tx queues configured yet. Using default Tx function.");
360 		}
361 
362 		ngbe_set_rx_function(eth_dev);
363 
364 		return 0;
365 	}
366 
367 	rte_eth_copy_pci_info(eth_dev, pci_dev);
368 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
369 
370 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
371 
372 	/* Vendor and Device ID need to be set before init of shared code */
373 	hw->back = pci_dev;
374 	hw->port_id = eth_dev->data->port_id;
375 	hw->device_id = pci_dev->id.device_id;
376 	hw->vendor_id = pci_dev->id.vendor_id;
377 	if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) {
378 		hw->sub_system_id = pci_dev->id.subsystem_device_id;
379 	} else {
380 		u32 ssid = 0;
381 
382 		err = ngbe_flash_read_dword(hw, 0xFFFDC, &ssid);
383 		if (err) {
384 			PMD_INIT_LOG(ERR,
385 				"Read of internal subsystem device id failed");
386 			return -ENODEV;
387 		}
388 		hw->sub_system_id = (u16)ssid >> 8 | (u16)ssid << 8;
389 	}
390 	ngbe_map_device_id(hw);
391 
392 	/* Reserve memory for interrupt status block */
393 	mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
394 		NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
395 	if (mz == NULL)
396 		return -ENOMEM;
397 
398 	hw->isb_dma = TMZ_PADDR(mz);
399 	hw->isb_mem = TMZ_VADDR(mz);
400 
401 	/* Initialize the shared code (base driver) */
402 	err = ngbe_init_shared_code(hw);
403 	if (err != 0) {
404 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
405 		return -EIO;
406 	}
407 
408 	/* Unlock any pending hardware semaphore */
409 	ngbe_swfw_lock_reset(hw);
410 	ngbe_set_ncsi_status(hw);
411 
412 	/* Get Hardware Flow Control setting */
413 	hw->fc.requested_mode = ngbe_fc_full;
414 	hw->fc.current_mode = ngbe_fc_full;
415 	hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
416 	hw->fc.low_water = NGBE_FC_XON_LOTH;
417 	hw->fc.high_water = NGBE_FC_XOFF_HITH;
418 	hw->fc.send_xon = 1;
419 
420 	err = hw->rom.init_params(hw);
421 	if (err != 0) {
422 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
423 		return -EIO;
424 	}
425 
426 	/* Make sure we have a good EEPROM before we read from it */
427 	err = hw->rom.validate_checksum(hw, NULL);
428 	if (err != 0) {
429 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
430 		return -EIO;
431 	}
432 
433 	err = hw->phy.led_oem_chk(hw, &led_conf);
434 	if (err == 0)
435 		hw->led_conf = led_conf;
436 	else
437 		hw->led_conf = 0xFFFF;
438 
439 	err = hw->mac.init_hw(hw);
440 	if (err != 0) {
441 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
442 		return -EIO;
443 	}
444 
445 	/* Reset the hw statistics */
446 	ngbe_dev_stats_reset(eth_dev);
447 
448 	/* disable interrupt */
449 	ngbe_disable_intr(hw);
450 
451 	/* Allocate memory for storing MAC addresses */
452 	eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
453 					       hw->mac.num_rar_entries, 0);
454 	if (eth_dev->data->mac_addrs == NULL) {
455 		PMD_INIT_LOG(ERR,
456 			     "Failed to allocate %u bytes needed to store MAC addresses",
457 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
458 		return -ENOMEM;
459 	}
460 
461 	/* Copy the permanent MAC address */
462 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
463 			&eth_dev->data->mac_addrs[0]);
464 
465 	/* Allocate memory for storing hash filter MAC addresses */
466 	eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
467 			RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
468 	if (eth_dev->data->hash_mac_addrs == NULL) {
469 		PMD_INIT_LOG(ERR,
470 			     "Failed to allocate %d bytes needed to store MAC addresses",
471 			     RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
472 		rte_free(eth_dev->data->mac_addrs);
473 		eth_dev->data->mac_addrs = NULL;
474 		return -ENOMEM;
475 	}
476 
477 	/* initialize the vfta */
478 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
479 
480 	/* initialize the hw strip bitmap*/
481 	memset(hwstrip, 0, sizeof(*hwstrip));
482 
483 	/* initialize PF if max_vfs not zero */
484 	ret = ngbe_pf_host_init(eth_dev);
485 	if (ret) {
486 		rte_free(eth_dev->data->mac_addrs);
487 		eth_dev->data->mac_addrs = NULL;
488 		rte_free(eth_dev->data->hash_mac_addrs);
489 		eth_dev->data->hash_mac_addrs = NULL;
490 		return ret;
491 	}
492 
493 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
494 	/* let hardware know driver is loaded */
495 	ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
496 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
497 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
498 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
499 	ngbe_flush(hw);
500 
501 	PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
502 			(int)hw->mac.type, (int)hw->phy.type);
503 
504 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
505 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
506 		     pci_dev->id.device_id);
507 
508 	rte_intr_callback_register(intr_handle,
509 				   ngbe_dev_interrupt_handler, eth_dev);
510 
511 	/* enable uio/vfio intr/eventfd mapping */
512 	rte_intr_enable(intr_handle);
513 
514 	/* enable support intr */
515 	ngbe_enable_intr(eth_dev);
516 
517 	return 0;
518 }
519 
520 static int
521 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
522 {
523 	PMD_INIT_FUNC_TRACE();
524 
525 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
526 		return 0;
527 
528 	ngbe_dev_close(eth_dev);
529 
530 	return 0;
531 }
532 
533 static int
534 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
535 		struct rte_pci_device *pci_dev)
536 {
537 	return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
538 			sizeof(struct ngbe_adapter),
539 			eth_dev_pci_specific_init, pci_dev,
540 			eth_ngbe_dev_init, NULL);
541 }
542 
543 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
544 {
545 	struct rte_eth_dev *ethdev;
546 
547 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
548 	if (ethdev == NULL)
549 		return 0;
550 
551 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ngbe_dev_uninit);
552 }
553 
554 static struct rte_pci_driver rte_ngbe_pmd = {
555 	.id_table = pci_id_ngbe_map,
556 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
557 		     RTE_PCI_DRV_INTR_LSC,
558 	.probe = eth_ngbe_pci_probe,
559 	.remove = eth_ngbe_pci_remove,
560 };
561 
562 static int
563 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
564 {
565 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
566 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
567 	uint32_t vfta;
568 	uint32_t vid_idx;
569 	uint32_t vid_bit;
570 
571 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
572 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
573 	vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
574 	if (on)
575 		vfta |= vid_bit;
576 	else
577 		vfta &= ~vid_bit;
578 	wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
579 
580 	/* update local VFTA copy */
581 	shadow_vfta->vfta[vid_idx] = vfta;
582 
583 	return 0;
584 }
585 
586 static void
587 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
588 {
589 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
590 	struct ngbe_rx_queue *rxq;
591 	bool restart;
592 	uint32_t rxcfg, rxbal, rxbah;
593 
594 	if (on)
595 		ngbe_vlan_hw_strip_enable(dev, queue);
596 	else
597 		ngbe_vlan_hw_strip_disable(dev, queue);
598 
599 	rxq = dev->data->rx_queues[queue];
600 	rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
601 	rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
602 	rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
603 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
604 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
605 			!(rxcfg & NGBE_RXCFG_VLAN);
606 		rxcfg |= NGBE_RXCFG_VLAN;
607 	} else {
608 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
609 			(rxcfg & NGBE_RXCFG_VLAN);
610 		rxcfg &= ~NGBE_RXCFG_VLAN;
611 	}
612 	rxcfg &= ~NGBE_RXCFG_ENA;
613 
614 	if (restart) {
615 		/* set vlan strip for ring */
616 		ngbe_dev_rx_queue_stop(dev, queue);
617 		wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
618 		wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
619 		wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
620 		ngbe_dev_rx_queue_start(dev, queue);
621 	}
622 }
623 
624 static int
625 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
626 		    enum rte_vlan_type vlan_type,
627 		    uint16_t tpid)
628 {
629 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
630 	int ret = 0;
631 	uint32_t portctrl, vlan_ext, qinq;
632 
633 	portctrl = rd32(hw, NGBE_PORTCTL);
634 
635 	vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
636 	qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
637 	switch (vlan_type) {
638 	case RTE_ETH_VLAN_TYPE_INNER:
639 		if (vlan_ext) {
640 			wr32m(hw, NGBE_VLANCTL,
641 				NGBE_VLANCTL_TPID_MASK,
642 				NGBE_VLANCTL_TPID(tpid));
643 			wr32m(hw, NGBE_DMATXCTRL,
644 				NGBE_DMATXCTRL_TPID_MASK,
645 				NGBE_DMATXCTRL_TPID(tpid));
646 		} else {
647 			ret = -ENOTSUP;
648 			PMD_DRV_LOG(ERR,
649 				"Inner type is not supported by single VLAN");
650 		}
651 
652 		if (qinq) {
653 			wr32m(hw, NGBE_TAGTPID(0),
654 				NGBE_TAGTPID_LSB_MASK,
655 				NGBE_TAGTPID_LSB(tpid));
656 		}
657 		break;
658 	case RTE_ETH_VLAN_TYPE_OUTER:
659 		if (vlan_ext) {
660 			/* Only the high 16-bits is valid */
661 			wr32m(hw, NGBE_EXTAG,
662 				NGBE_EXTAG_VLAN_MASK,
663 				NGBE_EXTAG_VLAN(tpid));
664 		} else {
665 			wr32m(hw, NGBE_VLANCTL,
666 				NGBE_VLANCTL_TPID_MASK,
667 				NGBE_VLANCTL_TPID(tpid));
668 			wr32m(hw, NGBE_DMATXCTRL,
669 				NGBE_DMATXCTRL_TPID_MASK,
670 				NGBE_DMATXCTRL_TPID(tpid));
671 		}
672 
673 		if (qinq) {
674 			wr32m(hw, NGBE_TAGTPID(0),
675 				NGBE_TAGTPID_MSB_MASK,
676 				NGBE_TAGTPID_MSB(tpid));
677 		}
678 		break;
679 	default:
680 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
681 		return -EINVAL;
682 	}
683 
684 	return ret;
685 }
686 
687 void
688 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
689 {
690 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
691 	uint32_t vlnctrl;
692 
693 	PMD_INIT_FUNC_TRACE();
694 
695 	/* Filter Table Disable */
696 	vlnctrl = rd32(hw, NGBE_VLANCTL);
697 	vlnctrl &= ~NGBE_VLANCTL_VFE;
698 	wr32(hw, NGBE_VLANCTL, vlnctrl);
699 }
700 
701 void
702 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
703 {
704 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
705 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
706 	uint32_t vlnctrl;
707 	uint16_t i;
708 
709 	PMD_INIT_FUNC_TRACE();
710 
711 	/* Filter Table Enable */
712 	vlnctrl = rd32(hw, NGBE_VLANCTL);
713 	vlnctrl &= ~NGBE_VLANCTL_CFIENA;
714 	vlnctrl |= NGBE_VLANCTL_VFE;
715 	wr32(hw, NGBE_VLANCTL, vlnctrl);
716 
717 	/* write whatever is in local vfta copy */
718 	for (i = 0; i < NGBE_VFTA_SIZE; i++)
719 		wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
720 }
721 
722 void
723 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
724 {
725 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
726 	struct ngbe_rx_queue *rxq;
727 
728 	if (queue >= NGBE_MAX_RX_QUEUE_NUM)
729 		return;
730 
731 	if (on)
732 		NGBE_SET_HWSTRIP(hwstrip, queue);
733 	else
734 		NGBE_CLEAR_HWSTRIP(hwstrip, queue);
735 
736 	if (queue >= dev->data->nb_rx_queues)
737 		return;
738 
739 	rxq = dev->data->rx_queues[queue];
740 
741 	if (on) {
742 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
743 		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
744 	} else {
745 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
746 		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
747 	}
748 }
749 
750 static void
751 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
752 {
753 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
754 	uint32_t ctrl;
755 
756 	PMD_INIT_FUNC_TRACE();
757 
758 	ctrl = rd32(hw, NGBE_RXCFG(queue));
759 	ctrl &= ~NGBE_RXCFG_VLAN;
760 	wr32(hw, NGBE_RXCFG(queue), ctrl);
761 
762 	/* record those setting for HW strip per queue */
763 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
764 }
765 
766 static void
767 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
768 {
769 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
770 	uint32_t ctrl;
771 
772 	PMD_INIT_FUNC_TRACE();
773 
774 	ctrl = rd32(hw, NGBE_RXCFG(queue));
775 	ctrl |= NGBE_RXCFG_VLAN;
776 	wr32(hw, NGBE_RXCFG(queue), ctrl);
777 
778 	/* record those setting for HW strip per queue */
779 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
780 }
781 
782 static void
783 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
784 {
785 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
786 	uint32_t ctrl;
787 
788 	PMD_INIT_FUNC_TRACE();
789 
790 	ctrl = rd32(hw, NGBE_PORTCTL);
791 	ctrl &= ~NGBE_PORTCTL_VLANEXT;
792 	ctrl &= ~NGBE_PORTCTL_QINQ;
793 	wr32(hw, NGBE_PORTCTL, ctrl);
794 }
795 
796 static void
797 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
798 {
799 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
800 	uint32_t ctrl;
801 
802 	PMD_INIT_FUNC_TRACE();
803 
804 	ctrl  = rd32(hw, NGBE_PORTCTL);
805 	ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
806 	wr32(hw, NGBE_PORTCTL, ctrl);
807 }
808 
809 static void
810 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
811 {
812 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
813 	uint32_t ctrl;
814 
815 	PMD_INIT_FUNC_TRACE();
816 
817 	ctrl = rd32(hw, NGBE_PORTCTL);
818 	ctrl &= ~NGBE_PORTCTL_QINQ;
819 	wr32(hw, NGBE_PORTCTL, ctrl);
820 }
821 
822 static void
823 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
824 {
825 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
826 	uint32_t ctrl;
827 
828 	PMD_INIT_FUNC_TRACE();
829 
830 	ctrl  = rd32(hw, NGBE_PORTCTL);
831 	ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
832 	wr32(hw, NGBE_PORTCTL, ctrl);
833 }
834 
835 void
836 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
837 {
838 	struct ngbe_rx_queue *rxq;
839 	uint16_t i;
840 
841 	PMD_INIT_FUNC_TRACE();
842 
843 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
844 		rxq = dev->data->rx_queues[i];
845 
846 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
847 			ngbe_vlan_hw_strip_enable(dev, i);
848 		else
849 			ngbe_vlan_hw_strip_disable(dev, i);
850 	}
851 }
852 
853 void
854 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
855 {
856 	uint16_t i;
857 	struct rte_eth_rxmode *rxmode;
858 	struct ngbe_rx_queue *rxq;
859 
860 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
861 		rxmode = &dev->data->dev_conf.rxmode;
862 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
863 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
864 				rxq = dev->data->rx_queues[i];
865 				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
866 			}
867 		else
868 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
869 				rxq = dev->data->rx_queues[i];
870 				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
871 			}
872 	}
873 }
874 
875 static int
876 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
877 {
878 	struct rte_eth_rxmode *rxmode;
879 	rxmode = &dev->data->dev_conf.rxmode;
880 
881 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
882 		ngbe_vlan_hw_strip_config(dev);
883 
884 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
885 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
886 			ngbe_vlan_hw_filter_enable(dev);
887 		else
888 			ngbe_vlan_hw_filter_disable(dev);
889 	}
890 
891 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
892 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
893 			ngbe_vlan_hw_extend_enable(dev);
894 		else
895 			ngbe_vlan_hw_extend_disable(dev);
896 	}
897 
898 	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
899 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
900 			ngbe_qinq_hw_strip_enable(dev);
901 		else
902 			ngbe_qinq_hw_strip_disable(dev);
903 	}
904 
905 	return 0;
906 }
907 
908 static int
909 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
910 {
911 	ngbe_config_vlan_strip_on_all_queues(dev, mask);
912 
913 	ngbe_vlan_offload_config(dev, mask);
914 
915 	return 0;
916 }
917 
918 static int
919 ngbe_dev_configure(struct rte_eth_dev *dev)
920 {
921 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
922 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
923 
924 	PMD_INIT_FUNC_TRACE();
925 
926 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
927 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
928 
929 	/* set flag to update link status after init */
930 	intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
931 
932 	/*
933 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
934 	 * allocation Rx preconditions we will reset it.
935 	 */
936 	adapter->rx_bulk_alloc_allowed = true;
937 	adapter->rx_vec_allowed = true;
938 
939 	return 0;
940 }
941 
942 static void
943 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
944 {
945 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
946 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
947 
948 	wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
949 	wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
950 	wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
951 	if (hw->phy.type == ngbe_phy_yt8521s_sfi)
952 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
953 	else
954 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
955 
956 	intr->mask_misc |= NGBE_ICRMISC_GPIO | NGBE_ICRMISC_HEAT;
957 }
958 
959 /*
960  * Configure device link speed and setup link.
961  * It returns 0 on success.
962  */
963 static int
964 ngbe_dev_start(struct rte_eth_dev *dev)
965 {
966 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
967 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
968 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
969 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
970 	uint32_t intr_vector = 0;
971 	int err;
972 	bool link_up = false, negotiate = false;
973 	uint32_t speed = 0;
974 	uint32_t allowed_speeds = 0;
975 	int mask = 0;
976 	int status;
977 	uint32_t *link_speeds;
978 
979 	PMD_INIT_FUNC_TRACE();
980 
981 	/* disable uio/vfio intr/eventfd mapping */
982 	rte_intr_disable(intr_handle);
983 
984 	/* stop adapter */
985 	hw->adapter_stopped = 0;
986 
987 	/* reinitialize adapter, this calls reset and start */
988 	hw->nb_rx_queues = dev->data->nb_rx_queues;
989 	hw->nb_tx_queues = dev->data->nb_tx_queues;
990 	status = ngbe_pf_reset_hw(hw);
991 	if (status != 0)
992 		return -1;
993 	hw->mac.start_hw(hw);
994 	hw->mac.get_link_status = true;
995 
996 	ngbe_set_pcie_master(hw, true);
997 
998 	/* configure PF module if SRIOV enabled */
999 	ngbe_pf_host_configure(dev);
1000 
1001 	ngbe_dev_phy_intr_setup(dev);
1002 
1003 	/* check and configure queue intr-vector mapping */
1004 	if ((rte_intr_cap_multiple(intr_handle) ||
1005 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
1006 	    dev->data->dev_conf.intr_conf.rxq != 0) {
1007 		intr_vector = dev->data->nb_rx_queues;
1008 		if (rte_intr_efd_enable(intr_handle, intr_vector))
1009 			return -1;
1010 	}
1011 
1012 	if (rte_intr_dp_is_en(intr_handle)) {
1013 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
1014 						   dev->data->nb_rx_queues)) {
1015 			PMD_INIT_LOG(ERR,
1016 				     "Failed to allocate %d rx_queues intr_vec",
1017 				     dev->data->nb_rx_queues);
1018 			return -ENOMEM;
1019 		}
1020 	}
1021 
1022 	/* configure MSI-X for sleep until Rx interrupt */
1023 	ngbe_configure_msix(dev);
1024 
1025 	/* initialize transmission unit */
1026 	ngbe_dev_tx_init(dev);
1027 
1028 	/* This can fail when allocating mbufs for descriptor rings */
1029 	err = ngbe_dev_rx_init(dev);
1030 	if (err != 0) {
1031 		PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
1032 		goto error;
1033 	}
1034 
1035 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1036 		RTE_ETH_VLAN_EXTEND_MASK;
1037 	err = ngbe_vlan_offload_config(dev, mask);
1038 	if (err != 0) {
1039 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1040 		goto error;
1041 	}
1042 
1043 	hw->mac.setup_pba(hw);
1044 	ngbe_pbthresh_set(dev);
1045 	ngbe_configure_port(dev);
1046 
1047 	err = ngbe_dev_rxtx_start(dev);
1048 	if (err < 0) {
1049 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1050 		goto error;
1051 	}
1052 
1053 	/* Skip link setup if loopback mode is enabled. */
1054 	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1055 		goto skip_link_setup;
1056 
1057 	hw->lsc = dev->data->dev_conf.intr_conf.lsc;
1058 
1059 	err = hw->mac.check_link(hw, &speed, &link_up, 0);
1060 	if (err != 0)
1061 		goto error;
1062 	dev->data->dev_link.link_status = link_up;
1063 
1064 	link_speeds = &dev->data->dev_conf.link_speeds;
1065 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
1066 		negotiate = true;
1067 
1068 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1069 	if (err != 0)
1070 		goto error;
1071 
1072 	allowed_speeds = 0;
1073 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1074 		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
1075 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1076 		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
1077 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1078 		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
1079 
1080 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
1081 		PMD_INIT_LOG(ERR, "Invalid link setting");
1082 		goto error;
1083 	}
1084 
1085 	speed = 0x0;
1086 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1087 		speed = hw->mac.default_speeds;
1088 	} else {
1089 		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1090 			speed |= NGBE_LINK_SPEED_1GB_FULL;
1091 		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1092 			speed |= NGBE_LINK_SPEED_100M_FULL;
1093 		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
1094 			speed |= NGBE_LINK_SPEED_10M_FULL;
1095 	}
1096 
1097 	if (!hw->ncsi_enabled) {
1098 		err = hw->phy.init_hw(hw);
1099 		if (err != 0) {
1100 			PMD_INIT_LOG(ERR, "PHY init failed");
1101 			goto error;
1102 		}
1103 	}
1104 	err = hw->mac.setup_link(hw, speed, link_up);
1105 	if (err != 0)
1106 		goto error;
1107 
1108 skip_link_setup:
1109 
1110 	if (rte_intr_allow_others(intr_handle)) {
1111 		ngbe_dev_misc_interrupt_setup(dev);
1112 		/* check if lsc interrupt is enabled */
1113 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1114 			ngbe_dev_lsc_interrupt_setup(dev, TRUE);
1115 		else
1116 			ngbe_dev_lsc_interrupt_setup(dev, FALSE);
1117 		ngbe_dev_macsec_interrupt_setup(dev);
1118 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1119 	} else {
1120 		rte_intr_callback_unregister(intr_handle,
1121 					     ngbe_dev_interrupt_handler, dev);
1122 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1123 			PMD_INIT_LOG(INFO,
1124 				     "LSC won't enable because of no intr multiplex");
1125 	}
1126 
1127 	/* check if rxq interrupt is enabled */
1128 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1129 	    rte_intr_dp_is_en(intr_handle))
1130 		ngbe_dev_rxq_interrupt_setup(dev);
1131 
1132 	/* enable UIO/VFIO intr/eventfd mapping */
1133 	rte_intr_enable(intr_handle);
1134 
1135 	/* resume enabled intr since HW reset */
1136 	ngbe_enable_intr(dev);
1137 
1138 	if (hw->gpio_ctl) {
1139 		/* gpio0 is used to power on/off control*/
1140 		wr32(hw, NGBE_GPIODATA, 0);
1141 	}
1142 
1143 	/*
1144 	 * Update link status right before return, because it may
1145 	 * start link configuration process in a separate thread.
1146 	 */
1147 	ngbe_dev_link_update(dev, 0);
1148 
1149 	ngbe_read_stats_registers(hw, hw_stats);
1150 	hw->offset_loaded = 1;
1151 
1152 	return 0;
1153 
1154 error:
1155 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1156 	ngbe_dev_clear_queues(dev);
1157 	return -EIO;
1158 }
1159 
1160 /*
1161  * Stop device: disable rx and tx functions to allow for reconfiguring.
1162  */
1163 static int
1164 ngbe_dev_stop(struct rte_eth_dev *dev)
1165 {
1166 	struct rte_eth_link link;
1167 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1168 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1169 	struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1170 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1171 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1172 	int vf;
1173 
1174 	if (hw->adapter_stopped)
1175 		goto out;
1176 
1177 	PMD_INIT_FUNC_TRACE();
1178 
1179 	if (hw->gpio_ctl) {
1180 		/* gpio0 is used to power on/off control*/
1181 		wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1182 	}
1183 
1184 	/* disable interrupts */
1185 	ngbe_disable_intr(hw);
1186 
1187 	/* reset the NIC */
1188 	ngbe_pf_reset_hw(hw);
1189 	hw->adapter_stopped = 0;
1190 
1191 	/* stop adapter */
1192 	ngbe_stop_hw(hw);
1193 
1194 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1195 		vfinfo[vf].clear_to_send = false;
1196 
1197 	ngbe_dev_clear_queues(dev);
1198 
1199 	/* Clear stored conf */
1200 	dev->data->scattered_rx = 0;
1201 
1202 	/* Clear recorded link status */
1203 	memset(&link, 0, sizeof(link));
1204 	rte_eth_linkstatus_set(dev, &link);
1205 
1206 	if (!rte_intr_allow_others(intr_handle))
1207 		/* resume to the default handler */
1208 		rte_intr_callback_register(intr_handle,
1209 					   ngbe_dev_interrupt_handler,
1210 					   (void *)dev);
1211 
1212 	/* Clean datapath event and queue/vec mapping */
1213 	rte_intr_efd_disable(intr_handle);
1214 	rte_intr_vec_list_free(intr_handle);
1215 
1216 	ngbe_set_pcie_master(hw, true);
1217 
1218 	adapter->rss_reta_updated = 0;
1219 
1220 	hw->adapter_stopped = true;
1221 	dev->data->dev_started = 0;
1222 
1223 out:
1224 	/* close phy to prevent reset in dev_close from restarting physical link */
1225 	if (!(hw->wol_enabled || hw->ncsi_enabled))
1226 		hw->phy.set_phy_power(hw, false);
1227 
1228 	return 0;
1229 }
1230 
1231 /*
1232  * Set device link up: power on.
1233  */
1234 static int
1235 ngbe_dev_set_link_up(struct rte_eth_dev *dev)
1236 {
1237 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1238 
1239 	if (!(hw->ncsi_enabled || hw->wol_enabled))
1240 		hw->phy.set_phy_power(hw, true);
1241 
1242 	return 0;
1243 }
1244 
1245 /*
1246  * Set device link down: power off.
1247  */
1248 static int
1249 ngbe_dev_set_link_down(struct rte_eth_dev *dev)
1250 {
1251 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1252 
1253 	if (!(hw->ncsi_enabled || hw->wol_enabled))
1254 		hw->phy.set_phy_power(hw, false);
1255 
1256 	return 0;
1257 }
1258 
1259 /*
1260  * Reset and stop device.
1261  */
1262 static int
1263 ngbe_dev_close(struct rte_eth_dev *dev)
1264 {
1265 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1266 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1267 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1268 	int retries = 0;
1269 	int ret;
1270 
1271 	PMD_INIT_FUNC_TRACE();
1272 
1273 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1274 		return 0;
1275 
1276 	ngbe_pf_reset_hw(hw);
1277 
1278 	ngbe_dev_stop(dev);
1279 
1280 	ngbe_dev_free_queues(dev);
1281 
1282 	ngbe_set_pcie_master(hw, false);
1283 
1284 	/* reprogram the RAR[0] in case user changed it. */
1285 	ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1286 
1287 	/* Unlock any pending hardware semaphore */
1288 	ngbe_swfw_lock_reset(hw);
1289 
1290 	/* disable uio intr before callback unregister */
1291 	rte_intr_disable(intr_handle);
1292 
1293 	do {
1294 		ret = rte_intr_callback_unregister(intr_handle,
1295 				ngbe_dev_interrupt_handler, dev);
1296 		if (ret >= 0 || ret == -ENOENT) {
1297 			break;
1298 		} else if (ret != -EAGAIN) {
1299 			PMD_INIT_LOG(ERR,
1300 				"intr callback unregister failed: %d",
1301 				ret);
1302 		}
1303 		rte_delay_ms(100);
1304 	} while (retries++ < (10 + NGBE_LINK_UP_TIME));
1305 
1306 	/* uninitialize PF if max_vfs not zero */
1307 	ngbe_pf_host_uninit(dev);
1308 
1309 	rte_free(dev->data->mac_addrs);
1310 	dev->data->mac_addrs = NULL;
1311 
1312 	rte_free(dev->data->hash_mac_addrs);
1313 	dev->data->hash_mac_addrs = NULL;
1314 
1315 	return ret;
1316 }
1317 
1318 /*
1319  * Reset PF device.
1320  */
1321 static int
1322 ngbe_dev_reset(struct rte_eth_dev *dev)
1323 {
1324 	int ret;
1325 
1326 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
1327 	 * its VF to make them align with it. The detailed notification
1328 	 * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1329 	 * To avoid unexpected behavior in VF, currently reset of PF with
1330 	 * SR-IOV activation is not supported. It might be supported later.
1331 	 */
1332 	if (dev->data->sriov.active)
1333 		return -ENOTSUP;
1334 
1335 	ret = eth_ngbe_dev_uninit(dev);
1336 	if (ret != 0)
1337 		return ret;
1338 
1339 	ret = eth_ngbe_dev_init(dev, NULL);
1340 
1341 	return ret;
1342 }
1343 
1344 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1345 	{                                                       \
1346 		uint32_t current_counter = rd32(hw, reg);       \
1347 		if (current_counter < last_counter)             \
1348 			current_counter += 0x100000000LL;       \
1349 		if (!hw->offset_loaded)                         \
1350 			last_counter = current_counter;         \
1351 		counter = current_counter - last_counter;       \
1352 		counter &= 0xFFFFFFFFLL;                        \
1353 	}
1354 
1355 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1356 	{                                                                \
1357 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1358 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1359 		uint64_t current_counter = (current_counter_msb << 32) | \
1360 			current_counter_lsb;                             \
1361 		if (current_counter < last_counter)                      \
1362 			current_counter += 0x1000000000LL;               \
1363 		if (!hw->offset_loaded)                                  \
1364 			last_counter = current_counter;                  \
1365 		counter = current_counter - last_counter;                \
1366 		counter &= 0xFFFFFFFFFLL;                                \
1367 	}
1368 
1369 void
1370 ngbe_read_stats_registers(struct ngbe_hw *hw,
1371 			   struct ngbe_hw_stats *hw_stats)
1372 {
1373 	unsigned int i;
1374 
1375 	/* QP Stats */
1376 	for (i = 0; i < hw->nb_rx_queues; i++) {
1377 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1378 			hw->qp_last[i].rx_qp_packets,
1379 			hw_stats->qp[i].rx_qp_packets);
1380 		UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1381 			hw->qp_last[i].rx_qp_bytes,
1382 			hw_stats->qp[i].rx_qp_bytes);
1383 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1384 			hw->qp_last[i].rx_qp_mc_packets,
1385 			hw_stats->qp[i].rx_qp_mc_packets);
1386 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1387 			hw->qp_last[i].rx_qp_bc_packets,
1388 			hw_stats->qp[i].rx_qp_bc_packets);
1389 	}
1390 
1391 	for (i = 0; i < hw->nb_tx_queues; i++) {
1392 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1393 			hw->qp_last[i].tx_qp_packets,
1394 			hw_stats->qp[i].tx_qp_packets);
1395 		UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1396 			hw->qp_last[i].tx_qp_bytes,
1397 			hw_stats->qp[i].tx_qp_bytes);
1398 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1399 			hw->qp_last[i].tx_qp_mc_packets,
1400 			hw_stats->qp[i].tx_qp_mc_packets);
1401 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1402 			hw->qp_last[i].tx_qp_bc_packets,
1403 			hw_stats->qp[i].tx_qp_bc_packets);
1404 	}
1405 
1406 	/* PB Stats */
1407 	hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1408 	hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1409 	hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1410 	hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1411 	hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1412 	hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1413 
1414 	hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1415 	hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1416 
1417 	/* DMA Stats */
1418 	hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1419 	hw_stats->tx_dma_drop += rd32(hw, NGBE_DMATXDROP);
1420 	hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1421 	hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1422 	hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1423 	hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1424 	hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1425 
1426 	/* MAC Stats */
1427 	hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1428 	hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1429 	hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1430 
1431 	hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1432 	hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1433 	hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1434 
1435 	hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1436 	hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1437 
1438 	hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1439 	hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1440 	hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1441 	hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1442 	hw_stats->rx_size_512_to_1023_packets +=
1443 			rd64(hw, NGBE_MACRX512TO1023L);
1444 	hw_stats->rx_size_1024_to_max_packets +=
1445 			rd64(hw, NGBE_MACRX1024TOMAXL);
1446 	hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1447 	hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1448 	hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1449 	hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1450 	hw_stats->tx_size_512_to_1023_packets +=
1451 			rd64(hw, NGBE_MACTX512TO1023L);
1452 	hw_stats->tx_size_1024_to_max_packets +=
1453 			rd64(hw, NGBE_MACTX1024TOMAXL);
1454 
1455 	hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1456 	hw_stats->rx_oversize_cnt += rd32(hw, NGBE_MACRXOVERSIZE);
1457 	hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1458 
1459 	/* MNG Stats */
1460 	hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1461 	hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1462 	hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1463 	hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1464 
1465 	/* MACsec Stats */
1466 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1467 	hw_stats->tx_macsec_pkts_encrypted +=
1468 			rd32(hw, NGBE_LSECTX_ENCPKT);
1469 	hw_stats->tx_macsec_pkts_protected +=
1470 			rd32(hw, NGBE_LSECTX_PROTPKT);
1471 	hw_stats->tx_macsec_octets_encrypted +=
1472 			rd32(hw, NGBE_LSECTX_ENCOCT);
1473 	hw_stats->tx_macsec_octets_protected +=
1474 			rd32(hw, NGBE_LSECTX_PROTOCT);
1475 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1476 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1477 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1478 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1479 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1480 	hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1481 	hw_stats->rx_macsec_sc_pkts_unchecked +=
1482 			rd32(hw, NGBE_LSECRX_UNCHKPKT);
1483 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1484 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1485 	for (i = 0; i < 2; i++) {
1486 		hw_stats->rx_macsec_sa_pkts_ok +=
1487 			rd32(hw, NGBE_LSECRX_OKPKT(i));
1488 		hw_stats->rx_macsec_sa_pkts_invalid +=
1489 			rd32(hw, NGBE_LSECRX_INVPKT(i));
1490 		hw_stats->rx_macsec_sa_pkts_notvalid +=
1491 			rd32(hw, NGBE_LSECRX_BADPKT(i));
1492 	}
1493 	for (i = 0; i < 4; i++) {
1494 		hw_stats->rx_macsec_sa_pkts_unusedsa +=
1495 			rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1496 		hw_stats->rx_macsec_sa_pkts_notusingsa +=
1497 			rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1498 	}
1499 	hw_stats->rx_total_missed_packets =
1500 			hw_stats->rx_up_dropped;
1501 }
1502 
1503 static int
1504 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1505 {
1506 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1507 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1508 	struct ngbe_stat_mappings *stat_mappings =
1509 			NGBE_DEV_STAT_MAPPINGS(dev);
1510 	uint32_t i, j;
1511 
1512 	ngbe_read_stats_registers(hw, hw_stats);
1513 
1514 	if (stats == NULL)
1515 		return -EINVAL;
1516 
1517 	/* Fill out the rte_eth_stats statistics structure */
1518 	stats->ipackets = hw_stats->rx_packets;
1519 	stats->ibytes = hw_stats->rx_bytes;
1520 	stats->opackets = hw_stats->tx_packets;
1521 	stats->obytes = hw_stats->tx_bytes;
1522 
1523 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1524 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1525 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1526 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1527 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1528 	for (i = 0; i < NGBE_MAX_QP; i++) {
1529 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1530 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1531 		uint32_t q_map;
1532 
1533 		q_map = (stat_mappings->rqsm[n] >> offset)
1534 				& QMAP_FIELD_RESERVED_BITS_MASK;
1535 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1536 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1537 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1538 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1539 
1540 		q_map = (stat_mappings->tqsm[n] >> offset)
1541 				& QMAP_FIELD_RESERVED_BITS_MASK;
1542 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1543 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1544 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1545 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1546 	}
1547 
1548 	/* Rx Errors */
1549 	stats->imissed  = hw_stats->rx_total_missed_packets +
1550 			  hw_stats->rx_dma_drop;
1551 	stats->ierrors  = hw_stats->rx_crc_errors +
1552 			  hw_stats->rx_mac_short_packet_dropped +
1553 			  hw_stats->rx_length_errors +
1554 			  hw_stats->rx_undersize_errors +
1555 			  hw_stats->rdb_drp_cnt +
1556 			  hw_stats->rx_illegal_byte_errors +
1557 			  hw_stats->rx_error_bytes +
1558 			  hw_stats->rx_fragment_errors;
1559 
1560 	/* Tx Errors */
1561 	stats->oerrors  = 0;
1562 	return 0;
1563 }
1564 
1565 static int
1566 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1567 {
1568 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1569 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1570 
1571 	/* HW registers are cleared on read */
1572 	hw->offset_loaded = 0;
1573 	ngbe_dev_stats_get(dev, NULL);
1574 	hw->offset_loaded = 1;
1575 
1576 	/* Reset software totals */
1577 	memset(hw_stats, 0, sizeof(*hw_stats));
1578 
1579 	return 0;
1580 }
1581 
1582 /* This function calculates the number of xstats based on the current config */
1583 static unsigned
1584 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1585 {
1586 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1587 	return NGBE_NB_HW_STATS +
1588 	       NGBE_NB_QP_STATS * nb_queues;
1589 }
1590 
1591 static inline int
1592 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1593 {
1594 	int nb, st;
1595 
1596 	/* Extended stats from ngbe_hw_stats */
1597 	if (id < NGBE_NB_HW_STATS) {
1598 		snprintf(name, size, "[hw]%s",
1599 			rte_ngbe_stats_strings[id].name);
1600 		return 0;
1601 	}
1602 	id -= NGBE_NB_HW_STATS;
1603 
1604 	/* Queue Stats */
1605 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1606 		nb = id / NGBE_NB_QP_STATS;
1607 		st = id % NGBE_NB_QP_STATS;
1608 		snprintf(name, size, "[q%u]%s", nb,
1609 			rte_ngbe_qp_strings[st].name);
1610 		return 0;
1611 	}
1612 	id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1613 
1614 	return -(int)(id + 1);
1615 }
1616 
1617 static inline int
1618 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1619 {
1620 	int nb, st;
1621 
1622 	/* Extended stats from ngbe_hw_stats */
1623 	if (id < NGBE_NB_HW_STATS) {
1624 		*offset = rte_ngbe_stats_strings[id].offset;
1625 		return 0;
1626 	}
1627 	id -= NGBE_NB_HW_STATS;
1628 
1629 	/* Queue Stats */
1630 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1631 		nb = id / NGBE_NB_QP_STATS;
1632 		st = id % NGBE_NB_QP_STATS;
1633 		*offset = rte_ngbe_qp_strings[st].offset +
1634 			nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1635 		return 0;
1636 	}
1637 
1638 	return -1;
1639 }
1640 
1641 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1642 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1643 {
1644 	unsigned int i, count;
1645 
1646 	count = ngbe_xstats_calc_num(dev);
1647 	if (xstats_names == NULL)
1648 		return count;
1649 
1650 	/* Note: limit >= cnt_stats checked upstream
1651 	 * in rte_eth_xstats_names()
1652 	 */
1653 	limit = min(limit, count);
1654 
1655 	/* Extended stats from ngbe_hw_stats */
1656 	for (i = 0; i < limit; i++) {
1657 		if (ngbe_get_name_by_id(i, xstats_names[i].name,
1658 			sizeof(xstats_names[i].name))) {
1659 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1660 			break;
1661 		}
1662 	}
1663 
1664 	return i;
1665 }
1666 
1667 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1668 	const uint64_t *ids,
1669 	struct rte_eth_xstat_name *xstats_names,
1670 	unsigned int limit)
1671 {
1672 	unsigned int i;
1673 
1674 	if (ids == NULL)
1675 		return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1676 
1677 	for (i = 0; i < limit; i++) {
1678 		if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1679 				sizeof(xstats_names[i].name))) {
1680 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1681 			return -1;
1682 		}
1683 	}
1684 
1685 	return i;
1686 }
1687 
1688 static int
1689 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1690 					 unsigned int limit)
1691 {
1692 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1693 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1694 	unsigned int i, count;
1695 
1696 	ngbe_read_stats_registers(hw, hw_stats);
1697 
1698 	/* If this is a reset xstats is NULL, and we have cleared the
1699 	 * registers by reading them.
1700 	 */
1701 	count = ngbe_xstats_calc_num(dev);
1702 	if (xstats == NULL)
1703 		return count;
1704 
1705 	limit = min(limit, ngbe_xstats_calc_num(dev));
1706 
1707 	/* Extended stats from ngbe_hw_stats */
1708 	for (i = 0; i < limit; i++) {
1709 		uint32_t offset = 0;
1710 
1711 		if (ngbe_get_offset_by_id(i, &offset)) {
1712 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1713 			break;
1714 		}
1715 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1716 		xstats[i].id = i;
1717 	}
1718 
1719 	return i;
1720 }
1721 
1722 static int
1723 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1724 					 unsigned int limit)
1725 {
1726 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1727 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1728 	unsigned int i, count;
1729 
1730 	ngbe_read_stats_registers(hw, hw_stats);
1731 
1732 	/* If this is a reset xstats is NULL, and we have cleared the
1733 	 * registers by reading them.
1734 	 */
1735 	count = ngbe_xstats_calc_num(dev);
1736 	if (values == NULL)
1737 		return count;
1738 
1739 	limit = min(limit, ngbe_xstats_calc_num(dev));
1740 
1741 	/* Extended stats from ngbe_hw_stats */
1742 	for (i = 0; i < limit; i++) {
1743 		uint32_t offset;
1744 
1745 		if (ngbe_get_offset_by_id(i, &offset)) {
1746 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1747 			break;
1748 		}
1749 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1750 	}
1751 
1752 	return i;
1753 }
1754 
1755 static int
1756 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1757 		uint64_t *values, unsigned int limit)
1758 {
1759 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1760 	unsigned int i;
1761 
1762 	if (ids == NULL)
1763 		return ngbe_dev_xstats_get_(dev, values, limit);
1764 
1765 	for (i = 0; i < limit; i++) {
1766 		uint32_t offset;
1767 
1768 		if (ngbe_get_offset_by_id(ids[i], &offset)) {
1769 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1770 			break;
1771 		}
1772 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1773 	}
1774 
1775 	return i;
1776 }
1777 
1778 static int
1779 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1780 {
1781 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1782 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1783 
1784 	/* HW registers are cleared on read */
1785 	hw->offset_loaded = 0;
1786 	ngbe_read_stats_registers(hw, hw_stats);
1787 	hw->offset_loaded = 1;
1788 
1789 	/* Reset software totals */
1790 	memset(hw_stats, 0, sizeof(*hw_stats));
1791 
1792 	return 0;
1793 }
1794 
1795 static int
1796 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1797 {
1798 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1799 	int ret;
1800 
1801 	ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1802 
1803 	if (ret < 0)
1804 		return -EINVAL;
1805 
1806 	ret += 1; /* add the size of '\0' */
1807 	if (fw_size < (size_t)ret)
1808 		return ret;
1809 
1810 	return 0;
1811 }
1812 
1813 static int
1814 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1815 {
1816 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1817 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1818 
1819 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1820 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1821 	dev_info->min_rx_bufsize = 1024;
1822 	dev_info->max_rx_pktlen = NGBE_MAX_MTU + NGBE_ETH_OVERHEAD;
1823 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1824 	dev_info->max_mtu = NGBE_MAX_MTU;
1825 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1826 	dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1827 	dev_info->max_vfs = pci_dev->max_vfs;
1828 	dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1829 	dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1830 				     dev_info->rx_queue_offload_capa);
1831 	dev_info->tx_queue_offload_capa = 0;
1832 	dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1833 
1834 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1835 		.rx_thresh = {
1836 			.pthresh = NGBE_DEFAULT_RX_PTHRESH,
1837 			.hthresh = NGBE_DEFAULT_RX_HTHRESH,
1838 			.wthresh = NGBE_DEFAULT_RX_WTHRESH,
1839 		},
1840 		.rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1841 		.rx_drop_en = 0,
1842 		.offloads = 0,
1843 	};
1844 
1845 	dev_info->default_txconf = (struct rte_eth_txconf) {
1846 		.tx_thresh = {
1847 			.pthresh = NGBE_DEFAULT_TX_PTHRESH,
1848 			.hthresh = NGBE_DEFAULT_TX_HTHRESH,
1849 			.wthresh = NGBE_DEFAULT_TX_WTHRESH,
1850 		},
1851 		.tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1852 		.offloads = 0,
1853 	};
1854 
1855 	dev_info->rx_desc_lim = rx_desc_lim;
1856 	dev_info->tx_desc_lim = tx_desc_lim;
1857 
1858 	dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1859 	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1860 	dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1861 
1862 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1863 				RTE_ETH_LINK_SPEED_10M;
1864 
1865 	/* Driver-preferred Rx/Tx parameters */
1866 	dev_info->default_rxportconf.burst_size = 32;
1867 	dev_info->default_txportconf.burst_size = 32;
1868 	dev_info->default_rxportconf.nb_queues = 1;
1869 	dev_info->default_txportconf.nb_queues = 1;
1870 	dev_info->default_rxportconf.ring_size = 256;
1871 	dev_info->default_txportconf.ring_size = 256;
1872 
1873 	return 0;
1874 }
1875 
1876 const uint32_t *
1877 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
1878 {
1879 	if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1880 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)
1881 	    dev->rx_pkt_burst == ngbe_recv_pkts_vec ||
1882 	    dev->rx_pkt_burst == ngbe_recv_scattered_pkts_vec ||
1883 #endif
1884 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1885 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1886 	    dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1887 		return ngbe_get_supported_ptypes(no_of_elements);
1888 
1889 	return NULL;
1890 }
1891 
1892 static void
1893 ngbe_dev_overheat(struct rte_eth_dev *dev)
1894 {
1895 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1896 	s32 temp_state;
1897 
1898 	temp_state = hw->mac.check_overtemp(hw);
1899 	if (!temp_state)
1900 		return;
1901 
1902 	if (temp_state == NGBE_ERR_UNDERTEMP) {
1903 		PMD_DRV_LOG(CRIT, "Network adapter has been started again, "
1904 			"since the temperature has been back to normal state.");
1905 		wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, NGBE_PBRXCTL_ENA);
1906 		ngbe_dev_set_link_up(dev);
1907 	} else if (temp_state == NGBE_ERR_OVERTEMP) {
1908 		PMD_DRV_LOG(CRIT, "Network adapter has been stopped because it has over heated.");
1909 		wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0);
1910 		ngbe_dev_set_link_down(dev);
1911 	}
1912 }
1913 
1914 /* return 0 means link status changed, -1 means not changed */
1915 int
1916 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1917 			    int wait_to_complete)
1918 {
1919 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1920 	struct rte_eth_link link;
1921 	u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1922 	u32 lan_speed = 0;
1923 	bool link_up;
1924 	int err;
1925 	int wait = 1;
1926 
1927 	memset(&link, 0, sizeof(link));
1928 	link.link_status = RTE_ETH_LINK_DOWN;
1929 	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1930 	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1931 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1932 			~RTE_ETH_LINK_SPEED_AUTONEG);
1933 
1934 	hw->mac.get_link_status = true;
1935 
1936 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
1937 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1938 		wait = 0;
1939 
1940 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1941 	if (err != 0) {
1942 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1943 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1944 		return rte_eth_linkstatus_set(dev, &link);
1945 	}
1946 
1947 	if (!link_up)
1948 		return rte_eth_linkstatus_set(dev, &link);
1949 
1950 	link.link_status = RTE_ETH_LINK_UP;
1951 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1952 
1953 	switch (link_speed) {
1954 	default:
1955 	case NGBE_LINK_SPEED_UNKNOWN:
1956 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1957 		break;
1958 
1959 	case NGBE_LINK_SPEED_10M_FULL:
1960 		link.link_speed = RTE_ETH_SPEED_NUM_10M;
1961 		lan_speed = 0;
1962 		break;
1963 
1964 	case NGBE_LINK_SPEED_100M_FULL:
1965 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
1966 		lan_speed = 1;
1967 		break;
1968 
1969 	case NGBE_LINK_SPEED_1GB_FULL:
1970 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
1971 		lan_speed = 2;
1972 		break;
1973 	}
1974 
1975 	if (hw->is_pf) {
1976 		wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1977 		if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1978 				NGBE_LINK_SPEED_100M_FULL |
1979 				NGBE_LINK_SPEED_10M_FULL)) {
1980 			wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1981 				NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1982 		}
1983 		wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_PROMISC,
1984 			NGBE_MACRXFLT_PROMISC);
1985 	}
1986 
1987 	return rte_eth_linkstatus_set(dev, &link);
1988 }
1989 
1990 static int
1991 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1992 {
1993 	return ngbe_dev_link_update_share(dev, wait_to_complete);
1994 }
1995 
1996 static int
1997 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1998 {
1999 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2000 	uint32_t fctrl;
2001 
2002 	fctrl = rd32(hw, NGBE_PSRCTL);
2003 	fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
2004 	wr32(hw, NGBE_PSRCTL, fctrl);
2005 
2006 	return 0;
2007 }
2008 
2009 static int
2010 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2011 {
2012 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2013 	uint32_t fctrl;
2014 
2015 	fctrl = rd32(hw, NGBE_PSRCTL);
2016 	fctrl &= (~NGBE_PSRCTL_UCP);
2017 	if (dev->data->all_multicast == 1)
2018 		fctrl |= NGBE_PSRCTL_MCP;
2019 	else
2020 		fctrl &= (~NGBE_PSRCTL_MCP);
2021 	wr32(hw, NGBE_PSRCTL, fctrl);
2022 
2023 	return 0;
2024 }
2025 
2026 static int
2027 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2028 {
2029 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2030 	uint32_t fctrl;
2031 
2032 	fctrl = rd32(hw, NGBE_PSRCTL);
2033 	fctrl |= NGBE_PSRCTL_MCP;
2034 	wr32(hw, NGBE_PSRCTL, fctrl);
2035 
2036 	return 0;
2037 }
2038 
2039 static int
2040 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2041 {
2042 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2043 	uint32_t fctrl;
2044 
2045 	if (dev->data->promiscuous == 1)
2046 		return 0; /* must remain in all_multicast mode */
2047 
2048 	fctrl = rd32(hw, NGBE_PSRCTL);
2049 	fctrl &= (~NGBE_PSRCTL_MCP);
2050 	wr32(hw, NGBE_PSRCTL, fctrl);
2051 
2052 	return 0;
2053 }
2054 
2055 /**
2056  * It clears the interrupt causes and enables the interrupt.
2057  * It will be called once only during NIC initialized.
2058  *
2059  * @param dev
2060  *  Pointer to struct rte_eth_dev.
2061  * @param on
2062  *  Enable or Disable.
2063  *
2064  * @return
2065  *  - On success, zero.
2066  *  - On failure, a negative value.
2067  */
2068 static int
2069 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2070 {
2071 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2072 
2073 	ngbe_dev_link_status_print(dev);
2074 	if (on != 0) {
2075 		intr->mask_misc |= NGBE_ICRMISC_PHY;
2076 		intr->mask_misc |= NGBE_ICRMISC_GPIO;
2077 	} else {
2078 		intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2079 		intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
2080 	}
2081 
2082 	return 0;
2083 }
2084 
2085 /**
2086  * It clears the interrupt causes and enables the interrupt.
2087  * It will be called once only during NIC initialized.
2088  *
2089  * @param dev
2090  *  Pointer to struct rte_eth_dev.
2091  *
2092  * @return
2093  *  - On success, zero.
2094  *  - On failure, a negative value.
2095  */
2096 static int
2097 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
2098 {
2099 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2100 	u64 mask;
2101 
2102 	mask = NGBE_ICR_MASK;
2103 	mask &= (1ULL << NGBE_MISC_VEC_ID);
2104 	intr->mask |= mask;
2105 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
2106 
2107 	return 0;
2108 }
2109 
2110 /**
2111  * It clears the interrupt causes and enables the interrupt.
2112  * It will be called once only during NIC initialized.
2113  *
2114  * @param dev
2115  *  Pointer to struct rte_eth_dev.
2116  *
2117  * @return
2118  *  - On success, zero.
2119  *  - On failure, a negative value.
2120  */
2121 static int
2122 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2123 {
2124 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2125 	u64 mask;
2126 
2127 	mask = NGBE_ICR_MASK;
2128 	mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
2129 	intr->mask |= mask;
2130 
2131 	return 0;
2132 }
2133 
2134 /**
2135  * It clears the interrupt causes and enables the interrupt.
2136  * It will be called once only during NIC initialized.
2137  *
2138  * @param dev
2139  *  Pointer to struct rte_eth_dev.
2140  *
2141  * @return
2142  *  - On success, zero.
2143  *  - On failure, a negative value.
2144  */
2145 static int
2146 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2147 {
2148 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2149 
2150 	intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
2151 
2152 	return 0;
2153 }
2154 
2155 /*
2156  * It reads ICR and sets flag for the link_update.
2157  *
2158  * @param dev
2159  *  Pointer to struct rte_eth_dev.
2160  *
2161  * @return
2162  *  - On success, zero.
2163  *  - On failure, a negative value.
2164  */
2165 static int
2166 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2167 {
2168 	uint32_t eicr;
2169 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2170 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2171 
2172 	/* read-on-clear nic registers here */
2173 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2174 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2175 
2176 	intr->flags = 0;
2177 
2178 	/* set flag for async link update */
2179 	if (eicr & NGBE_ICRMISC_PHY)
2180 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2181 
2182 	if (eicr & NGBE_ICRMISC_VFMBX)
2183 		intr->flags |= NGBE_FLAG_MAILBOX;
2184 
2185 	if (eicr & NGBE_ICRMISC_LNKSEC)
2186 		intr->flags |= NGBE_FLAG_MACSEC;
2187 
2188 	if (eicr & NGBE_ICRMISC_GPIO)
2189 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2190 
2191 	if (eicr & NGBE_ICRMISC_HEAT)
2192 		intr->flags |= NGBE_FLAG_OVERHEAT;
2193 
2194 	((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0;
2195 
2196 	return 0;
2197 }
2198 
2199 /**
2200  * It gets and then prints the link status.
2201  *
2202  * @param dev
2203  *  Pointer to struct rte_eth_dev.
2204  *
2205  * @return
2206  *  - On success, zero.
2207  *  - On failure, a negative value.
2208  */
2209 static void
2210 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2211 {
2212 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2213 	struct rte_eth_link link;
2214 
2215 	rte_eth_linkstatus_get(dev, &link);
2216 
2217 	if (link.link_status == RTE_ETH_LINK_UP) {
2218 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2219 					(int)(dev->data->port_id),
2220 					(unsigned int)link.link_speed,
2221 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2222 					"full-duplex" : "half-duplex");
2223 	} else {
2224 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
2225 				(int)(dev->data->port_id));
2226 	}
2227 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2228 				pci_dev->addr.domain,
2229 				pci_dev->addr.bus,
2230 				pci_dev->addr.devid,
2231 				pci_dev->addr.function);
2232 }
2233 
2234 /*
2235  * It executes link_update after knowing an interrupt occurred.
2236  *
2237  * @param dev
2238  *  Pointer to struct rte_eth_dev.
2239  *
2240  * @return
2241  *  - On success, zero.
2242  *  - On failure, a negative value.
2243  */
2244 static int
2245 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2246 {
2247 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2248 
2249 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2250 
2251 	if (intr->flags & NGBE_FLAG_MAILBOX) {
2252 		ngbe_pf_mbx_process(dev);
2253 		intr->flags &= ~NGBE_FLAG_MAILBOX;
2254 	}
2255 
2256 	if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2257 		struct rte_eth_link link;
2258 
2259 		/*get the link status before link update, for predicting later*/
2260 		rte_eth_linkstatus_get(dev, &link);
2261 
2262 		ngbe_dev_link_update(dev, 0);
2263 		intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2264 		ngbe_dev_link_status_print(dev);
2265 		if (dev->data->dev_link.link_speed != link.link_speed)
2266 			rte_eth_dev_callback_process(dev,
2267 				RTE_ETH_EVENT_INTR_LSC, NULL);
2268 	}
2269 
2270 	if (intr->flags & NGBE_FLAG_OVERHEAT) {
2271 		ngbe_dev_overheat(dev);
2272 		intr->flags &= ~NGBE_FLAG_OVERHEAT;
2273 	}
2274 
2275 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
2276 	ngbe_enable_intr(dev);
2277 
2278 	return 0;
2279 }
2280 
2281 /**
2282  * Interrupt handler triggered by NIC  for handling
2283  * specific interrupt.
2284  *
2285  * @param param
2286  *  The address of parameter (struct rte_eth_dev *) registered before.
2287  */
2288 static void
2289 ngbe_dev_interrupt_handler(void *param)
2290 {
2291 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2292 
2293 	ngbe_dev_interrupt_get_status(dev);
2294 	ngbe_dev_interrupt_action(dev);
2295 }
2296 
2297 static int
2298 ngbe_dev_led_on(struct rte_eth_dev *dev)
2299 {
2300 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2301 	return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
2302 }
2303 
2304 static int
2305 ngbe_dev_led_off(struct rte_eth_dev *dev)
2306 {
2307 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2308 	return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
2309 }
2310 
2311 static int
2312 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2313 {
2314 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2315 	uint32_t mflcn_reg;
2316 	uint32_t fccfg_reg;
2317 	int rx_pause;
2318 	int tx_pause;
2319 
2320 	fc_conf->pause_time = hw->fc.pause_time;
2321 	fc_conf->high_water = hw->fc.high_water;
2322 	fc_conf->low_water = hw->fc.low_water;
2323 	fc_conf->send_xon = hw->fc.send_xon;
2324 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2325 
2326 	/*
2327 	 * Return rx_pause status according to actual setting of
2328 	 * RXFCCFG register.
2329 	 */
2330 	mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2331 	if (mflcn_reg & NGBE_RXFCCFG_FC)
2332 		rx_pause = 1;
2333 	else
2334 		rx_pause = 0;
2335 
2336 	/*
2337 	 * Return tx_pause status according to actual setting of
2338 	 * TXFCCFG register.
2339 	 */
2340 	fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2341 	if (fccfg_reg & NGBE_TXFCCFG_FC)
2342 		tx_pause = 1;
2343 	else
2344 		tx_pause = 0;
2345 
2346 	if (rx_pause && tx_pause)
2347 		fc_conf->mode = RTE_ETH_FC_FULL;
2348 	else if (rx_pause)
2349 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2350 	else if (tx_pause)
2351 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2352 	else
2353 		fc_conf->mode = RTE_ETH_FC_NONE;
2354 
2355 	return 0;
2356 }
2357 
2358 static int
2359 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2360 {
2361 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2362 	int err;
2363 	uint32_t rx_buf_size;
2364 	uint32_t max_high_water;
2365 	enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2366 		ngbe_fc_none,
2367 		ngbe_fc_rx_pause,
2368 		ngbe_fc_tx_pause,
2369 		ngbe_fc_full
2370 	};
2371 
2372 	PMD_INIT_FUNC_TRACE();
2373 
2374 	rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2375 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2376 
2377 	/*
2378 	 * At least reserve one Ethernet frame for watermark
2379 	 * high_water/low_water in kilo bytes for ngbe
2380 	 */
2381 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2382 	if (fc_conf->high_water > max_high_water ||
2383 	    fc_conf->high_water < fc_conf->low_water) {
2384 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2385 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2386 		return -EINVAL;
2387 	}
2388 
2389 	hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2390 	hw->fc.pause_time     = fc_conf->pause_time;
2391 	hw->fc.high_water     = fc_conf->high_water;
2392 	hw->fc.low_water      = fc_conf->low_water;
2393 	hw->fc.send_xon       = fc_conf->send_xon;
2394 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2395 
2396 	err = hw->mac.fc_enable(hw);
2397 
2398 	/* Not negotiated is not an error case */
2399 	if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2400 		wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2401 		      (fc_conf->mac_ctrl_frame_fwd
2402 		       ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2403 		ngbe_flush(hw);
2404 
2405 		return 0;
2406 	}
2407 
2408 	PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2409 	return -EIO;
2410 }
2411 
2412 /* Additional bittime to account for NGBE framing */
2413 #define NGBE_ETH_FRAMING 20
2414 
2415 /*
2416  * ngbe_fc_hpbthresh_set - calculate high water mark for flow control
2417  *
2418  * @dv_id: device interface delay
2419  * @pb: packet buffer to calculate
2420  */
2421 static s32
2422 ngbe_fc_hpbthresh_set(struct rte_eth_dev *dev)
2423 {
2424 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2425 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2426 	u32 max_frame_size, tc, dv_id, rx_pb;
2427 	s32 kb, marker;
2428 
2429 	/* Calculate max LAN frame size */
2430 	max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK);
2431 	tc = max_frame_size + NGBE_ETH_FRAMING;
2432 
2433 	/* Calculate delay value for device */
2434 	dv_id = NGBE_DV(tc, tc);
2435 
2436 	/* Loopback switch introduces additional latency */
2437 	if (pci_dev->max_vfs)
2438 		dv_id += NGBE_B2BT(tc);
2439 
2440 	/* Delay value is calculated in bit times convert to KB */
2441 	kb = NGBE_BT2KB(dv_id);
2442 	rx_pb = rd32(hw, NGBE_PBRXSIZE) >> 10;
2443 
2444 	marker = rx_pb - kb;
2445 
2446 	/* It is possible that the packet buffer is not large enough
2447 	 * to provide required headroom. In this case throw an error
2448 	 * to user and do the best we can.
2449 	 */
2450 	if (marker < 0) {
2451 		PMD_DRV_LOG(WARNING, "Packet Buffer can not provide enough headroom to support flow control.");
2452 		marker = tc + 1;
2453 	}
2454 
2455 	return marker;
2456 }
2457 
2458 /*
2459  * ngbe_fc_lpbthresh_set - calculate low water mark for flow control
2460  *
2461  * @dv_id: device interface delay
2462  */
2463 static s32
2464 ngbe_fc_lpbthresh_set(struct rte_eth_dev *dev)
2465 {
2466 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2467 	u32 max_frame_size, tc, dv_id;
2468 	s32 kb;
2469 
2470 	/* Calculate max LAN frame size */
2471 	max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK);
2472 	tc = max_frame_size + NGBE_ETH_FRAMING;
2473 
2474 	/* Calculate delay value for device */
2475 	dv_id = NGBE_LOW_DV(tc);
2476 
2477 	/* Delay value is calculated in bit times convert to KB */
2478 	kb = NGBE_BT2KB(dv_id);
2479 
2480 	return kb;
2481 }
2482 
2483 /*
2484  * ngbe_pbthresh_setup - calculate and setup high low water marks
2485  */
2486 static void
2487 ngbe_pbthresh_set(struct rte_eth_dev *dev)
2488 {
2489 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2490 
2491 	hw->fc.high_water = ngbe_fc_hpbthresh_set(dev);
2492 	hw->fc.low_water = ngbe_fc_lpbthresh_set(dev);
2493 
2494 	/* Low water marks must not be larger than high water marks */
2495 	if (hw->fc.low_water > hw->fc.high_water)
2496 		hw->fc.low_water = 0;
2497 }
2498 
2499 int
2500 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2501 			  struct rte_eth_rss_reta_entry64 *reta_conf,
2502 			  uint16_t reta_size)
2503 {
2504 	uint8_t i, j, mask;
2505 	uint32_t reta;
2506 	uint16_t idx, shift;
2507 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2508 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2509 
2510 	PMD_INIT_FUNC_TRACE();
2511 
2512 	if (!hw->is_pf) {
2513 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2514 			"NIC.");
2515 		return -ENOTSUP;
2516 	}
2517 
2518 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2519 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2520 			"(%d) doesn't match the number hardware can supported "
2521 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2522 		return -EINVAL;
2523 	}
2524 
2525 	for (i = 0; i < reta_size; i += 4) {
2526 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2527 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2528 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2529 		if (!mask)
2530 			continue;
2531 
2532 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2533 		for (j = 0; j < 4; j++) {
2534 			if (RS8(mask, j, 0x1)) {
2535 				reta  &= ~(MS32(8 * j, 0xFF));
2536 				reta |= LS32(reta_conf[idx].reta[shift + j],
2537 						8 * j, 0xFF);
2538 			}
2539 		}
2540 		wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2541 	}
2542 	adapter->rss_reta_updated = 1;
2543 
2544 	return 0;
2545 }
2546 
2547 int
2548 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2549 			 struct rte_eth_rss_reta_entry64 *reta_conf,
2550 			 uint16_t reta_size)
2551 {
2552 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2553 	uint8_t i, j, mask;
2554 	uint32_t reta;
2555 	uint16_t idx, shift;
2556 
2557 	PMD_INIT_FUNC_TRACE();
2558 
2559 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2560 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2561 			"(%d) doesn't match the number hardware can supported "
2562 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2563 		return -EINVAL;
2564 	}
2565 
2566 	for (i = 0; i < reta_size; i += 4) {
2567 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2568 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2569 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2570 		if (!mask)
2571 			continue;
2572 
2573 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2574 		for (j = 0; j < 4; j++) {
2575 			if (RS8(mask, j, 0x1))
2576 				reta_conf[idx].reta[shift + j] =
2577 					(uint16_t)RS32(reta, 8 * j, 0xFF);
2578 		}
2579 	}
2580 
2581 	return 0;
2582 }
2583 
2584 static int
2585 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2586 				uint32_t index, uint32_t pool)
2587 {
2588 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2589 	uint32_t enable_addr = 1;
2590 
2591 	return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2592 			     pool, enable_addr);
2593 }
2594 
2595 static void
2596 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2597 {
2598 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2599 
2600 	ngbe_clear_rar(hw, index);
2601 }
2602 
2603 static int
2604 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2605 {
2606 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2607 
2608 	ngbe_remove_rar(dev, 0);
2609 	ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2610 
2611 	return 0;
2612 }
2613 
2614 static int
2615 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2616 {
2617 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2618 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
2619 	struct rte_eth_dev_data *dev_data = dev->data;
2620 
2621 	/* If device is started, refuse mtu that requires the support of
2622 	 * scattered packets when this feature has not been enabled before.
2623 	 */
2624 	if (dev_data->dev_started && !dev_data->scattered_rx &&
2625 	    (frame_size + 2 * RTE_VLAN_HLEN >
2626 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2627 		PMD_INIT_LOG(ERR, "Stop port first.");
2628 		return -EINVAL;
2629 	}
2630 
2631 	wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2632 		NGBE_FRMSZ_MAX(frame_size));
2633 
2634 	return 0;
2635 }
2636 
2637 static uint32_t
2638 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2639 {
2640 	uint32_t vector = 0;
2641 
2642 	switch (hw->mac.mc_filter_type) {
2643 	case 0:   /* use bits [47:36] of the address */
2644 		vector = ((uc_addr->addr_bytes[4] >> 4) |
2645 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
2646 		break;
2647 	case 1:   /* use bits [46:35] of the address */
2648 		vector = ((uc_addr->addr_bytes[4] >> 3) |
2649 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
2650 		break;
2651 	case 2:   /* use bits [45:34] of the address */
2652 		vector = ((uc_addr->addr_bytes[4] >> 2) |
2653 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
2654 		break;
2655 	case 3:   /* use bits [43:32] of the address */
2656 		vector = ((uc_addr->addr_bytes[4]) |
2657 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
2658 		break;
2659 	default:  /* Invalid mc_filter_type */
2660 		break;
2661 	}
2662 
2663 	/* vector can only be 12-bits or boundary will be exceeded */
2664 	vector &= 0xFFF;
2665 	return vector;
2666 }
2667 
2668 static int
2669 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2670 			struct rte_ether_addr *mac_addr, uint8_t on)
2671 {
2672 	uint32_t vector;
2673 	uint32_t uta_idx;
2674 	uint32_t reg_val;
2675 	uint32_t uta_mask;
2676 	uint32_t psrctl;
2677 
2678 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2679 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2680 
2681 	vector = ngbe_uta_vector(hw, mac_addr);
2682 	uta_idx = (vector >> 5) & 0x7F;
2683 	uta_mask = 0x1UL << (vector & 0x1F);
2684 
2685 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2686 		return 0;
2687 
2688 	reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2689 	if (on) {
2690 		uta_info->uta_in_use++;
2691 		reg_val |= uta_mask;
2692 		uta_info->uta_shadow[uta_idx] |= uta_mask;
2693 	} else {
2694 		uta_info->uta_in_use--;
2695 		reg_val &= ~uta_mask;
2696 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2697 	}
2698 
2699 	wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2700 
2701 	psrctl = rd32(hw, NGBE_PSRCTL);
2702 	if (uta_info->uta_in_use > 0)
2703 		psrctl |= NGBE_PSRCTL_UCHFENA;
2704 	else
2705 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2706 
2707 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2708 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2709 	wr32(hw, NGBE_PSRCTL, psrctl);
2710 
2711 	return 0;
2712 }
2713 
2714 static int
2715 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2716 {
2717 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2718 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2719 	uint32_t psrctl;
2720 	int i;
2721 
2722 	if (on) {
2723 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2724 			uta_info->uta_shadow[i] = ~0;
2725 			wr32(hw, NGBE_UCADDRTBL(i), ~0);
2726 		}
2727 	} else {
2728 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2729 			uta_info->uta_shadow[i] = 0;
2730 			wr32(hw, NGBE_UCADDRTBL(i), 0);
2731 		}
2732 	}
2733 
2734 	psrctl = rd32(hw, NGBE_PSRCTL);
2735 	if (on)
2736 		psrctl |= NGBE_PSRCTL_UCHFENA;
2737 	else
2738 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2739 
2740 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2741 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2742 	wr32(hw, NGBE_PSRCTL, psrctl);
2743 
2744 	return 0;
2745 }
2746 
2747 static int
2748 ngbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2749 {
2750 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2751 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2752 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2753 	uint32_t mask;
2754 
2755 	mask = rd32(hw, NGBE_IMC(0));
2756 	mask |= (1 << queue_id);
2757 	wr32(hw, NGBE_IMC(0), mask);
2758 	rte_intr_enable(intr_handle);
2759 
2760 	return 0;
2761 }
2762 
2763 static int
2764 ngbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2765 {
2766 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2767 	uint32_t mask;
2768 
2769 	mask = rd32(hw, NGBE_IMS(0));
2770 	mask |= (1 << queue_id);
2771 	wr32(hw, NGBE_IMS(0), mask);
2772 
2773 	return 0;
2774 }
2775 
2776 /**
2777  * Set the IVAR registers, mapping interrupt causes to vectors
2778  * @param hw
2779  *  pointer to ngbe_hw struct
2780  * @direction
2781  *  0 for Rx, 1 for Tx, -1 for other causes
2782  * @queue
2783  *  queue to map the corresponding interrupt to
2784  * @msix_vector
2785  *  the vector to map to the corresponding queue
2786  */
2787 void
2788 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2789 		   uint8_t queue, uint8_t msix_vector)
2790 {
2791 	uint32_t tmp, idx;
2792 
2793 	if (direction == -1) {
2794 		/* other causes */
2795 		msix_vector |= NGBE_IVARMISC_VLD;
2796 		idx = 0;
2797 		tmp = rd32(hw, NGBE_IVARMISC);
2798 		tmp &= ~(0xFF << idx);
2799 		tmp |= (msix_vector << idx);
2800 		wr32(hw, NGBE_IVARMISC, tmp);
2801 	} else {
2802 		/* rx or tx causes */
2803 		msix_vector |= NGBE_IVAR_VLD; /* Workaround for ICR lost */
2804 		idx = ((16 * (queue & 1)) + (8 * direction));
2805 		tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2806 		tmp &= ~(0xFF << idx);
2807 		tmp |= (msix_vector << idx);
2808 		wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2809 	}
2810 }
2811 
2812 /**
2813  * Sets up the hardware to properly generate MSI-X interrupts
2814  * @hw
2815  *  board private structure
2816  */
2817 static void
2818 ngbe_configure_msix(struct rte_eth_dev *dev)
2819 {
2820 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2821 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2822 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2823 	uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2824 	uint32_t vec = NGBE_MISC_VEC_ID;
2825 	uint32_t gpie;
2826 
2827 	/*
2828 	 * Won't configure MSI-X register if no mapping is done
2829 	 * between intr vector and event fd
2830 	 * but if MSI-X has been enabled already, need to configure
2831 	 * auto clean, auto mask and throttling.
2832 	 */
2833 	gpie = rd32(hw, NGBE_GPIE);
2834 	if (!rte_intr_dp_is_en(intr_handle) &&
2835 	    !(gpie & NGBE_GPIE_MSIX))
2836 		return;
2837 
2838 	if (rte_intr_allow_others(intr_handle)) {
2839 		base = NGBE_RX_VEC_START;
2840 		vec = base;
2841 	}
2842 
2843 	/* setup GPIE for MSI-X mode */
2844 	gpie = rd32(hw, NGBE_GPIE);
2845 	gpie |= NGBE_GPIE_MSIX;
2846 	wr32(hw, NGBE_GPIE, gpie);
2847 
2848 	/* Populate the IVAR table and set the ITR values to the
2849 	 * corresponding register.
2850 	 */
2851 	if (rte_intr_dp_is_en(intr_handle)) {
2852 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2853 			queue_id++) {
2854 			/* by default, 1:1 mapping */
2855 			ngbe_set_ivar_map(hw, 0, queue_id, vec);
2856 			rte_intr_vec_list_index_set(intr_handle,
2857 							   queue_id, vec);
2858 			if (vec < base + rte_intr_nb_efd_get(intr_handle)
2859 			    - 1)
2860 				vec++;
2861 		}
2862 
2863 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2864 	}
2865 	wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2866 			NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2867 			| NGBE_ITR_WRDSA);
2868 }
2869 
2870 static u8 *
2871 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2872 			u8 **mc_addr_ptr, u32 *vmdq)
2873 {
2874 	u8 *mc_addr;
2875 
2876 	*vmdq = 0;
2877 	mc_addr = *mc_addr_ptr;
2878 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2879 	return mc_addr;
2880 }
2881 
2882 int
2883 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2884 			  struct rte_ether_addr *mc_addr_set,
2885 			  uint32_t nb_mc_addr)
2886 {
2887 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2888 	u8 *mc_addr_list;
2889 
2890 	mc_addr_list = (u8 *)mc_addr_set;
2891 	return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2892 					 ngbe_dev_addr_list_itr, TRUE);
2893 }
2894 
2895 static uint64_t
2896 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
2897 {
2898 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2899 	uint64_t systime_cycles;
2900 
2901 	systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
2902 	systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
2903 
2904 	return systime_cycles;
2905 }
2906 
2907 static uint64_t
2908 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2909 {
2910 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2911 	uint64_t rx_tstamp_cycles;
2912 
2913 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
2914 	rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
2915 	rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
2916 
2917 	return rx_tstamp_cycles;
2918 }
2919 
2920 static uint64_t
2921 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2922 {
2923 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2924 	uint64_t tx_tstamp_cycles;
2925 
2926 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
2927 	tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
2928 	tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
2929 
2930 	return tx_tstamp_cycles;
2931 }
2932 
2933 static void
2934 ngbe_start_timecounters(struct rte_eth_dev *dev)
2935 {
2936 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2937 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2938 	uint32_t incval = 0;
2939 	uint32_t shift = 0;
2940 
2941 	incval = NGBE_INCVAL_1GB;
2942 	shift = NGBE_INCVAL_SHIFT_1GB;
2943 
2944 	wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
2945 
2946 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
2947 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2948 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2949 
2950 	adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2951 	adapter->systime_tc.cc_shift = shift;
2952 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
2953 
2954 	adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2955 	adapter->rx_tstamp_tc.cc_shift = shift;
2956 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2957 
2958 	adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2959 	adapter->tx_tstamp_tc.cc_shift = shift;
2960 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2961 }
2962 
2963 static int
2964 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2965 {
2966 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2967 
2968 	adapter->systime_tc.nsec += delta;
2969 	adapter->rx_tstamp_tc.nsec += delta;
2970 	adapter->tx_tstamp_tc.nsec += delta;
2971 
2972 	return 0;
2973 }
2974 
2975 static int
2976 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2977 {
2978 	uint64_t ns;
2979 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2980 
2981 	ns = rte_timespec_to_ns(ts);
2982 	/* Set the timecounters to a new value. */
2983 	adapter->systime_tc.nsec = ns;
2984 	adapter->rx_tstamp_tc.nsec = ns;
2985 	adapter->tx_tstamp_tc.nsec = ns;
2986 
2987 	return 0;
2988 }
2989 
2990 static int
2991 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2992 {
2993 	uint64_t ns, systime_cycles;
2994 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2995 
2996 	systime_cycles = ngbe_read_systime_cyclecounter(dev);
2997 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
2998 	*ts = rte_ns_to_timespec(ns);
2999 
3000 	return 0;
3001 }
3002 
3003 static int
3004 ngbe_timesync_enable(struct rte_eth_dev *dev)
3005 {
3006 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3007 	uint32_t tsync_ctl;
3008 
3009 	/* Stop the timesync system time. */
3010 	wr32(hw, NGBE_TSTIMEINC, 0x0);
3011 	/* Reset the timesync system time value. */
3012 	wr32(hw, NGBE_TSTIMEL, 0x0);
3013 	wr32(hw, NGBE_TSTIMEH, 0x0);
3014 
3015 	ngbe_start_timecounters(dev);
3016 
3017 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3018 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588),
3019 		RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588);
3020 
3021 	/* Enable timestamping of received PTP packets. */
3022 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
3023 	tsync_ctl |= NGBE_TSRXCTL_ENA;
3024 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
3025 
3026 	/* Enable timestamping of transmitted PTP packets. */
3027 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
3028 	tsync_ctl |= NGBE_TSTXCTL_ENA;
3029 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
3030 
3031 	ngbe_flush(hw);
3032 
3033 	return 0;
3034 }
3035 
3036 static int
3037 ngbe_timesync_disable(struct rte_eth_dev *dev)
3038 {
3039 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3040 	uint32_t tsync_ctl;
3041 
3042 	/* Disable timestamping of transmitted PTP packets. */
3043 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
3044 	tsync_ctl &= ~NGBE_TSTXCTL_ENA;
3045 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
3046 
3047 	/* Disable timestamping of received PTP packets. */
3048 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
3049 	tsync_ctl &= ~NGBE_TSRXCTL_ENA;
3050 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
3051 
3052 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3053 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
3054 
3055 	/* Stop incrementing the System Time registers. */
3056 	wr32(hw, NGBE_TSTIMEINC, 0);
3057 
3058 	return 0;
3059 }
3060 
3061 static int
3062 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3063 				 struct timespec *timestamp,
3064 				 uint32_t flags __rte_unused)
3065 {
3066 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3067 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
3068 	uint32_t tsync_rxctl;
3069 	uint64_t rx_tstamp_cycles;
3070 	uint64_t ns;
3071 
3072 	tsync_rxctl = rd32(hw, NGBE_TSRXCTL);
3073 	if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0)
3074 		return -EINVAL;
3075 
3076 	rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev);
3077 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
3078 	*timestamp = rte_ns_to_timespec(ns);
3079 
3080 	return  0;
3081 }
3082 
3083 static int
3084 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3085 				 struct timespec *timestamp)
3086 {
3087 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3088 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
3089 	uint32_t tsync_txctl;
3090 	uint64_t tx_tstamp_cycles;
3091 	uint64_t ns;
3092 
3093 	tsync_txctl = rd32(hw, NGBE_TSTXCTL);
3094 	if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0)
3095 		return -EINVAL;
3096 
3097 	tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev);
3098 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
3099 	*timestamp = rte_ns_to_timespec(ns);
3100 
3101 	return 0;
3102 }
3103 
3104 static int
3105 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
3106 {
3107 	int count = 0;
3108 	int g_ind = 0;
3109 	const struct reg_info *reg_group;
3110 	const struct reg_info **reg_set = ngbe_regs_others;
3111 
3112 	while ((reg_group = reg_set[g_ind++]))
3113 		count += ngbe_regs_group_count(reg_group);
3114 
3115 	return count;
3116 }
3117 
3118 static int
3119 ngbe_get_regs(struct rte_eth_dev *dev,
3120 	      struct rte_dev_reg_info *regs)
3121 {
3122 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3123 	uint32_t *data = regs->data;
3124 	int g_ind = 0;
3125 	int count = 0;
3126 	const struct reg_info *reg_group;
3127 	const struct reg_info **reg_set = ngbe_regs_others;
3128 
3129 	if (data == NULL) {
3130 		regs->length = ngbe_get_reg_length(dev);
3131 		regs->width = sizeof(uint32_t);
3132 		return 0;
3133 	}
3134 
3135 	/* Support only full register dump */
3136 	if (regs->length == 0 ||
3137 	    regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
3138 		regs->version = hw->mac.type << 24 |
3139 				hw->revision_id << 16 |
3140 				hw->device_id;
3141 		while ((reg_group = reg_set[g_ind++]))
3142 			count += ngbe_read_regs_group(dev, &data[count],
3143 						      reg_group);
3144 		return 0;
3145 	}
3146 
3147 	return -ENOTSUP;
3148 }
3149 
3150 static int
3151 ngbe_get_eeprom_length(struct rte_eth_dev *dev)
3152 {
3153 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3154 
3155 	/* Return unit is byte count */
3156 	return hw->rom.word_size * 2;
3157 }
3158 
3159 static int
3160 ngbe_get_eeprom(struct rte_eth_dev *dev,
3161 		struct rte_dev_eeprom_info *in_eeprom)
3162 {
3163 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3164 	struct ngbe_rom_info *eeprom = &hw->rom;
3165 	uint16_t *data = in_eeprom->data;
3166 	int first, length;
3167 
3168 	first = in_eeprom->offset >> 1;
3169 	length = in_eeprom->length >> 1;
3170 	if (first > hw->rom.word_size ||
3171 	    ((first + length) > hw->rom.word_size))
3172 		return -EINVAL;
3173 
3174 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3175 
3176 	return eeprom->readw_buffer(hw, first, length, data);
3177 }
3178 
3179 static int
3180 ngbe_set_eeprom(struct rte_eth_dev *dev,
3181 		struct rte_dev_eeprom_info *in_eeprom)
3182 {
3183 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3184 	struct ngbe_rom_info *eeprom = &hw->rom;
3185 	uint16_t *data = in_eeprom->data;
3186 	int first, length;
3187 
3188 	first = in_eeprom->offset >> 1;
3189 	length = in_eeprom->length >> 1;
3190 	if (first > hw->rom.word_size ||
3191 	    ((first + length) > hw->rom.word_size))
3192 		return -EINVAL;
3193 
3194 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3195 
3196 	return eeprom->writew_buffer(hw,  first, length, data);
3197 }
3198 
3199 static const struct eth_dev_ops ngbe_eth_dev_ops = {
3200 	.dev_configure              = ngbe_dev_configure,
3201 	.dev_infos_get              = ngbe_dev_info_get,
3202 	.dev_start                  = ngbe_dev_start,
3203 	.dev_stop                   = ngbe_dev_stop,
3204 	.dev_set_link_up            = ngbe_dev_set_link_up,
3205 	.dev_set_link_down          = ngbe_dev_set_link_down,
3206 	.dev_close                  = ngbe_dev_close,
3207 	.dev_reset                  = ngbe_dev_reset,
3208 	.promiscuous_enable         = ngbe_dev_promiscuous_enable,
3209 	.promiscuous_disable        = ngbe_dev_promiscuous_disable,
3210 	.allmulticast_enable        = ngbe_dev_allmulticast_enable,
3211 	.allmulticast_disable       = ngbe_dev_allmulticast_disable,
3212 	.link_update                = ngbe_dev_link_update,
3213 	.stats_get                  = ngbe_dev_stats_get,
3214 	.xstats_get                 = ngbe_dev_xstats_get,
3215 	.xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
3216 	.stats_reset                = ngbe_dev_stats_reset,
3217 	.xstats_reset               = ngbe_dev_xstats_reset,
3218 	.xstats_get_names           = ngbe_dev_xstats_get_names,
3219 	.xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
3220 	.fw_version_get             = ngbe_fw_version_get,
3221 	.dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
3222 	.mtu_set                    = ngbe_dev_mtu_set,
3223 	.vlan_filter_set            = ngbe_vlan_filter_set,
3224 	.vlan_tpid_set              = ngbe_vlan_tpid_set,
3225 	.vlan_offload_set           = ngbe_vlan_offload_set,
3226 	.vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
3227 	.rx_queue_start	            = ngbe_dev_rx_queue_start,
3228 	.rx_queue_stop              = ngbe_dev_rx_queue_stop,
3229 	.tx_queue_start	            = ngbe_dev_tx_queue_start,
3230 	.tx_queue_stop              = ngbe_dev_tx_queue_stop,
3231 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
3232 	.rx_queue_release           = ngbe_dev_rx_queue_release,
3233 	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
3234 	.tx_queue_release           = ngbe_dev_tx_queue_release,
3235 	.rx_queue_intr_enable       = ngbe_dev_rx_queue_intr_enable,
3236 	.rx_queue_intr_disable      = ngbe_dev_rx_queue_intr_disable,
3237 	.dev_led_on                 = ngbe_dev_led_on,
3238 	.dev_led_off                = ngbe_dev_led_off,
3239 	.flow_ctrl_get              = ngbe_flow_ctrl_get,
3240 	.flow_ctrl_set              = ngbe_flow_ctrl_set,
3241 	.mac_addr_add               = ngbe_add_rar,
3242 	.mac_addr_remove            = ngbe_remove_rar,
3243 	.mac_addr_set               = ngbe_set_default_mac_addr,
3244 	.uc_hash_table_set          = ngbe_uc_hash_table_set,
3245 	.uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
3246 	.reta_update                = ngbe_dev_rss_reta_update,
3247 	.reta_query                 = ngbe_dev_rss_reta_query,
3248 	.rss_hash_update            = ngbe_dev_rss_hash_update,
3249 	.rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
3250 	.set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
3251 	.rxq_info_get               = ngbe_rxq_info_get,
3252 	.txq_info_get               = ngbe_txq_info_get,
3253 	.rx_burst_mode_get          = ngbe_rx_burst_mode_get,
3254 	.tx_burst_mode_get          = ngbe_tx_burst_mode_get,
3255 	.timesync_enable            = ngbe_timesync_enable,
3256 	.timesync_disable           = ngbe_timesync_disable,
3257 	.timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
3258 	.timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp,
3259 	.get_reg                    = ngbe_get_regs,
3260 	.get_eeprom_length          = ngbe_get_eeprom_length,
3261 	.get_eeprom                 = ngbe_get_eeprom,
3262 	.set_eeprom                 = ngbe_set_eeprom,
3263 	.timesync_adjust_time       = ngbe_timesync_adjust_time,
3264 	.timesync_read_time         = ngbe_timesync_read_time,
3265 	.timesync_write_time        = ngbe_timesync_write_time,
3266 	.tx_done_cleanup            = ngbe_dev_tx_done_cleanup,
3267 };
3268 
3269 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
3270 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
3271 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
3272 
3273 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
3274 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
3275 
3276 #ifdef RTE_ETHDEV_DEBUG_RX
3277 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
3278 #endif
3279 #ifdef RTE_ETHDEV_DEBUG_TX
3280 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
3281 #endif
3282