xref: /dpdk/drivers/net/ngbe/ngbe_ethdev.c (revision b9a87346b05c562dd6005ee025eca67a1a80bea8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9 
10 #include <rte_alarm.h>
11 
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16 #include "ngbe_regs_group.h"
17 
18 static const struct reg_info ngbe_regs_general[] = {
19 	{NGBE_RST, 1, 1, "NGBE_RST"},
20 	{NGBE_STAT, 1, 1, "NGBE_STAT"},
21 	{NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
22 	{NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
23 	{NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
24 	{NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
25 	{0, 0, 0, ""}
26 };
27 
28 static const struct reg_info ngbe_regs_nvm[] = {
29 	{0, 0, 0, ""}
30 };
31 
32 static const struct reg_info ngbe_regs_interrupt[] = {
33 	{0, 0, 0, ""}
34 };
35 
36 static const struct reg_info ngbe_regs_fctl_others[] = {
37 	{0, 0, 0, ""}
38 };
39 
40 static const struct reg_info ngbe_regs_rxdma[] = {
41 	{0, 0, 0, ""}
42 };
43 
44 static const struct reg_info ngbe_regs_rx[] = {
45 	{0, 0, 0, ""}
46 };
47 
48 static struct reg_info ngbe_regs_tx[] = {
49 	{0, 0, 0, ""}
50 };
51 
52 static const struct reg_info ngbe_regs_wakeup[] = {
53 	{0, 0, 0, ""}
54 };
55 
56 static const struct reg_info ngbe_regs_mac[] = {
57 	{0, 0, 0, ""}
58 };
59 
60 static const struct reg_info ngbe_regs_diagnostic[] = {
61 	{0, 0, 0, ""},
62 };
63 
64 /* PF registers */
65 static const struct reg_info *ngbe_regs_others[] = {
66 				ngbe_regs_general,
67 				ngbe_regs_nvm,
68 				ngbe_regs_interrupt,
69 				ngbe_regs_fctl_others,
70 				ngbe_regs_rxdma,
71 				ngbe_regs_rx,
72 				ngbe_regs_tx,
73 				ngbe_regs_wakeup,
74 				ngbe_regs_mac,
75 				ngbe_regs_diagnostic,
76 				NULL};
77 
78 static int ngbe_dev_close(struct rte_eth_dev *dev);
79 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
80 				int wait_to_complete);
81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
84 					uint16_t queue);
85 
86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
91 static void ngbe_dev_interrupt_handler(void *param);
92 static void ngbe_configure_msix(struct rte_eth_dev *dev);
93 static void ngbe_pbthresh_set(struct rte_eth_dev *dev);
94 
95 #define NGBE_SET_HWSTRIP(h, q) do {\
96 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
97 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
98 		(h)->bitmap[idx] |= 1 << bit;\
99 	} while (0)
100 
101 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
102 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
103 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
104 		(h)->bitmap[idx] &= ~(1 << bit);\
105 	} while (0)
106 
107 #define NGBE_GET_HWSTRIP(h, q, r) do {\
108 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
109 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
110 		(r) = (h)->bitmap[idx] >> bit & 1;\
111 	} while (0)
112 
113 /*
114  * The set of PCI devices this driver supports
115  */
116 static const struct rte_pci_id pci_id_ngbe_map[] = {
117 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
118 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
119 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
120 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
121 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
122 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
123 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
124 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
128 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
129 	{ .vendor_id = 0, /* sentinel */ },
130 };
131 
132 static const struct rte_eth_desc_lim rx_desc_lim = {
133 	.nb_max = NGBE_RING_DESC_MAX,
134 	.nb_min = NGBE_RING_DESC_MIN,
135 	.nb_align = NGBE_RXD_ALIGN,
136 };
137 
138 static const struct rte_eth_desc_lim tx_desc_lim = {
139 	.nb_max = NGBE_RING_DESC_MAX,
140 	.nb_min = NGBE_RING_DESC_MIN,
141 	.nb_align = NGBE_TXD_ALIGN,
142 	.nb_seg_max = NGBE_TX_MAX_SEG,
143 	.nb_mtu_seg_max = NGBE_TX_MAX_SEG,
144 };
145 
146 static const struct eth_dev_ops ngbe_eth_dev_ops;
147 
148 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
149 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
150 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
151 	/* MNG RxTx */
152 	HW_XSTAT(mng_bmc2host_packets),
153 	HW_XSTAT(mng_host2bmc_packets),
154 	/* Basic RxTx */
155 	HW_XSTAT(rx_packets),
156 	HW_XSTAT(tx_packets),
157 	HW_XSTAT(rx_bytes),
158 	HW_XSTAT(tx_bytes),
159 	HW_XSTAT(rx_total_bytes),
160 	HW_XSTAT(rx_total_packets),
161 	HW_XSTAT(tx_total_packets),
162 	HW_XSTAT(rx_total_missed_packets),
163 	HW_XSTAT(rx_broadcast_packets),
164 	HW_XSTAT(tx_broadcast_packets),
165 	HW_XSTAT(rx_multicast_packets),
166 	HW_XSTAT(tx_multicast_packets),
167 	HW_XSTAT(rx_management_packets),
168 	HW_XSTAT(tx_management_packets),
169 	HW_XSTAT(rx_management_dropped),
170 	HW_XSTAT(rx_dma_drop),
171 	HW_XSTAT(tx_dma_drop),
172 	HW_XSTAT(tx_secdrp_packets),
173 
174 	/* Basic Error */
175 	HW_XSTAT(rx_crc_errors),
176 	HW_XSTAT(rx_illegal_byte_errors),
177 	HW_XSTAT(rx_error_bytes),
178 	HW_XSTAT(rx_mac_short_packet_dropped),
179 	HW_XSTAT(rx_length_errors),
180 	HW_XSTAT(rx_undersize_errors),
181 	HW_XSTAT(rx_fragment_errors),
182 	HW_XSTAT(rx_oversize_cnt),
183 	HW_XSTAT(rx_jabber_errors),
184 	HW_XSTAT(rx_l3_l4_xsum_error),
185 	HW_XSTAT(mac_local_errors),
186 	HW_XSTAT(mac_remote_errors),
187 
188 	/* PB Stats */
189 	HW_XSTAT(rx_up_dropped),
190 	HW_XSTAT(rdb_pkt_cnt),
191 	HW_XSTAT(rdb_repli_cnt),
192 	HW_XSTAT(rdb_drp_cnt),
193 
194 	/* MACSEC */
195 	HW_XSTAT(tx_macsec_pkts_untagged),
196 	HW_XSTAT(tx_macsec_pkts_encrypted),
197 	HW_XSTAT(tx_macsec_pkts_protected),
198 	HW_XSTAT(tx_macsec_octets_encrypted),
199 	HW_XSTAT(tx_macsec_octets_protected),
200 	HW_XSTAT(rx_macsec_pkts_untagged),
201 	HW_XSTAT(rx_macsec_pkts_badtag),
202 	HW_XSTAT(rx_macsec_pkts_nosci),
203 	HW_XSTAT(rx_macsec_pkts_unknownsci),
204 	HW_XSTAT(rx_macsec_octets_decrypted),
205 	HW_XSTAT(rx_macsec_octets_validated),
206 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
207 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
208 	HW_XSTAT(rx_macsec_sc_pkts_late),
209 	HW_XSTAT(rx_macsec_sa_pkts_ok),
210 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
211 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
212 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
213 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
214 
215 	/* MAC RxTx */
216 	HW_XSTAT(rx_size_64_packets),
217 	HW_XSTAT(rx_size_65_to_127_packets),
218 	HW_XSTAT(rx_size_128_to_255_packets),
219 	HW_XSTAT(rx_size_256_to_511_packets),
220 	HW_XSTAT(rx_size_512_to_1023_packets),
221 	HW_XSTAT(rx_size_1024_to_max_packets),
222 	HW_XSTAT(tx_size_64_packets),
223 	HW_XSTAT(tx_size_65_to_127_packets),
224 	HW_XSTAT(tx_size_128_to_255_packets),
225 	HW_XSTAT(tx_size_256_to_511_packets),
226 	HW_XSTAT(tx_size_512_to_1023_packets),
227 	HW_XSTAT(tx_size_1024_to_max_packets),
228 
229 	/* Flow Control */
230 	HW_XSTAT(tx_xon_packets),
231 	HW_XSTAT(rx_xon_packets),
232 	HW_XSTAT(tx_xoff_packets),
233 	HW_XSTAT(rx_xoff_packets),
234 
235 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
236 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
237 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
238 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
239 };
240 
241 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
242 			   sizeof(rte_ngbe_stats_strings[0]))
243 
244 /* Per-queue statistics */
245 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
246 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
247 	QP_XSTAT(rx_qp_packets),
248 	QP_XSTAT(tx_qp_packets),
249 	QP_XSTAT(rx_qp_bytes),
250 	QP_XSTAT(tx_qp_bytes),
251 	QP_XSTAT(rx_qp_mc_packets),
252 };
253 
254 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
255 			   sizeof(rte_ngbe_qp_strings[0]))
256 
257 static inline int32_t
258 ngbe_pf_reset_hw(struct ngbe_hw *hw)
259 {
260 	uint32_t ctrl_ext;
261 	int32_t status;
262 
263 	status = hw->mac.reset_hw(hw);
264 
265 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
266 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
267 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
268 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
269 	ngbe_flush(hw);
270 
271 	if (status == NGBE_ERR_SFP_NOT_PRESENT)
272 		status = 0;
273 	return status;
274 }
275 
276 static inline void
277 ngbe_enable_intr(struct rte_eth_dev *dev)
278 {
279 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
280 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
281 
282 	wr32(hw, NGBE_IENMISC, intr->mask_misc);
283 	wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
284 	ngbe_flush(hw);
285 }
286 
287 static void
288 ngbe_disable_intr(struct ngbe_hw *hw)
289 {
290 	PMD_INIT_FUNC_TRACE();
291 
292 	wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
293 	ngbe_flush(hw);
294 }
295 
296 /*
297  * Ensure that all locks are released before first NVM or PHY access
298  */
299 static void
300 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
301 {
302 	uint16_t mask;
303 
304 	/*
305 	 * These ones are more tricky since they are common to all ports; but
306 	 * swfw_sync retries last long enough (1s) to be almost sure that if
307 	 * lock can not be taken it is due to an improper lock of the
308 	 * semaphore.
309 	 */
310 	mask = NGBE_MNGSEM_SWPHY |
311 	       NGBE_MNGSEM_SWMBX |
312 	       NGBE_MNGSEM_SWFLASH;
313 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
314 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
315 
316 	hw->mac.release_swfw_sync(hw, mask);
317 }
318 
319 static int
320 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
321 {
322 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
323 	struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
324 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
325 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
326 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
327 	const struct rte_memzone *mz;
328 	uint32_t ctrl_ext;
329 	u32 led_conf = 0;
330 	int err, ret;
331 
332 	PMD_INIT_FUNC_TRACE();
333 
334 	eth_dev->dev_ops = &ngbe_eth_dev_ops;
335 	eth_dev->rx_queue_count       = ngbe_dev_rx_queue_count;
336 	eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
337 	eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
338 	eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
339 	eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
340 	eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
341 
342 	/*
343 	 * For secondary processes, we don't initialise any further as primary
344 	 * has already done this work. Only check we don't need a different
345 	 * Rx and Tx function.
346 	 */
347 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
348 		struct ngbe_tx_queue *txq;
349 		/* Tx queue function in primary, set by last queue initialized
350 		 * Tx queue may not initialized by primary process
351 		 */
352 		if (eth_dev->data->tx_queues) {
353 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
354 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
355 			ngbe_set_tx_function(eth_dev, txq);
356 		} else {
357 			/* Use default Tx function if we get here */
358 			PMD_INIT_LOG(NOTICE,
359 				"No Tx queues configured yet. Using default Tx function.");
360 		}
361 
362 		ngbe_set_rx_function(eth_dev);
363 
364 		return 0;
365 	}
366 
367 	rte_eth_copy_pci_info(eth_dev, pci_dev);
368 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
369 
370 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
371 
372 	/* Vendor and Device ID need to be set before init of shared code */
373 	hw->back = pci_dev;
374 	hw->device_id = pci_dev->id.device_id;
375 	hw->vendor_id = pci_dev->id.vendor_id;
376 	if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) {
377 		hw->sub_system_id = pci_dev->id.subsystem_device_id;
378 	} else {
379 		u32 ssid;
380 
381 		ssid = ngbe_flash_read_dword(hw, 0xFFFDC);
382 		if (ssid == 0x1) {
383 			PMD_INIT_LOG(ERR,
384 				"Read of internal subsystem device id failed\n");
385 			return -ENODEV;
386 		}
387 		hw->sub_system_id = (u16)ssid >> 8 | (u16)ssid << 8;
388 	}
389 	ngbe_map_device_id(hw);
390 
391 	/* Reserve memory for interrupt status block */
392 	mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
393 		NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
394 	if (mz == NULL)
395 		return -ENOMEM;
396 
397 	hw->isb_dma = TMZ_PADDR(mz);
398 	hw->isb_mem = TMZ_VADDR(mz);
399 
400 	/* Initialize the shared code (base driver) */
401 	err = ngbe_init_shared_code(hw);
402 	if (err != 0) {
403 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
404 		return -EIO;
405 	}
406 
407 	/* Unlock any pending hardware semaphore */
408 	ngbe_swfw_lock_reset(hw);
409 
410 	/* Get Hardware Flow Control setting */
411 	hw->fc.requested_mode = ngbe_fc_full;
412 	hw->fc.current_mode = ngbe_fc_full;
413 	hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
414 	hw->fc.low_water = NGBE_FC_XON_LOTH;
415 	hw->fc.high_water = NGBE_FC_XOFF_HITH;
416 	hw->fc.send_xon = 1;
417 
418 	err = hw->rom.init_params(hw);
419 	if (err != 0) {
420 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
421 		return -EIO;
422 	}
423 
424 	/* Make sure we have a good EEPROM before we read from it */
425 	err = hw->rom.validate_checksum(hw, NULL);
426 	if (err != 0) {
427 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
428 		return -EIO;
429 	}
430 
431 	err = hw->phy.led_oem_chk(hw, &led_conf);
432 	if (err == 0)
433 		hw->led_conf = led_conf;
434 	else
435 		hw->led_conf = 0xFFFF;
436 
437 	err = hw->mac.init_hw(hw);
438 	if (err != 0) {
439 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
440 		return -EIO;
441 	}
442 
443 	/* Reset the hw statistics */
444 	ngbe_dev_stats_reset(eth_dev);
445 
446 	/* disable interrupt */
447 	ngbe_disable_intr(hw);
448 
449 	/* Allocate memory for storing MAC addresses */
450 	eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
451 					       hw->mac.num_rar_entries, 0);
452 	if (eth_dev->data->mac_addrs == NULL) {
453 		PMD_INIT_LOG(ERR,
454 			     "Failed to allocate %u bytes needed to store MAC addresses",
455 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
456 		return -ENOMEM;
457 	}
458 
459 	/* Copy the permanent MAC address */
460 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
461 			&eth_dev->data->mac_addrs[0]);
462 
463 	/* Allocate memory for storing hash filter MAC addresses */
464 	eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
465 			RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
466 	if (eth_dev->data->hash_mac_addrs == NULL) {
467 		PMD_INIT_LOG(ERR,
468 			     "Failed to allocate %d bytes needed to store MAC addresses",
469 			     RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
470 		rte_free(eth_dev->data->mac_addrs);
471 		eth_dev->data->mac_addrs = NULL;
472 		return -ENOMEM;
473 	}
474 
475 	/* initialize the vfta */
476 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
477 
478 	/* initialize the hw strip bitmap*/
479 	memset(hwstrip, 0, sizeof(*hwstrip));
480 
481 	/* initialize PF if max_vfs not zero */
482 	ret = ngbe_pf_host_init(eth_dev);
483 	if (ret) {
484 		rte_free(eth_dev->data->mac_addrs);
485 		eth_dev->data->mac_addrs = NULL;
486 		rte_free(eth_dev->data->hash_mac_addrs);
487 		eth_dev->data->hash_mac_addrs = NULL;
488 		return ret;
489 	}
490 
491 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
492 	/* let hardware know driver is loaded */
493 	ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
494 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
495 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
496 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
497 	ngbe_flush(hw);
498 
499 	PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
500 			(int)hw->mac.type, (int)hw->phy.type);
501 
502 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
503 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
504 		     pci_dev->id.device_id);
505 
506 	rte_intr_callback_register(intr_handle,
507 				   ngbe_dev_interrupt_handler, eth_dev);
508 
509 	/* enable uio/vfio intr/eventfd mapping */
510 	rte_intr_enable(intr_handle);
511 
512 	/* enable support intr */
513 	ngbe_enable_intr(eth_dev);
514 
515 	return 0;
516 }
517 
518 static int
519 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
520 {
521 	PMD_INIT_FUNC_TRACE();
522 
523 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
524 		return 0;
525 
526 	ngbe_dev_close(eth_dev);
527 
528 	return 0;
529 }
530 
531 static int
532 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
533 		struct rte_pci_device *pci_dev)
534 {
535 	return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
536 			sizeof(struct ngbe_adapter),
537 			eth_dev_pci_specific_init, pci_dev,
538 			eth_ngbe_dev_init, NULL);
539 }
540 
541 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
542 {
543 	struct rte_eth_dev *ethdev;
544 
545 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
546 	if (ethdev == NULL)
547 		return 0;
548 
549 	return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
550 }
551 
552 static struct rte_pci_driver rte_ngbe_pmd = {
553 	.id_table = pci_id_ngbe_map,
554 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
555 		     RTE_PCI_DRV_INTR_LSC,
556 	.probe = eth_ngbe_pci_probe,
557 	.remove = eth_ngbe_pci_remove,
558 };
559 
560 static int
561 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
562 {
563 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
564 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
565 	uint32_t vfta;
566 	uint32_t vid_idx;
567 	uint32_t vid_bit;
568 
569 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
570 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
571 	vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
572 	if (on)
573 		vfta |= vid_bit;
574 	else
575 		vfta &= ~vid_bit;
576 	wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
577 
578 	/* update local VFTA copy */
579 	shadow_vfta->vfta[vid_idx] = vfta;
580 
581 	return 0;
582 }
583 
584 static void
585 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
586 {
587 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
588 	struct ngbe_rx_queue *rxq;
589 	bool restart;
590 	uint32_t rxcfg, rxbal, rxbah;
591 
592 	if (on)
593 		ngbe_vlan_hw_strip_enable(dev, queue);
594 	else
595 		ngbe_vlan_hw_strip_disable(dev, queue);
596 
597 	rxq = dev->data->rx_queues[queue];
598 	rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
599 	rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
600 	rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
601 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
602 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
603 			!(rxcfg & NGBE_RXCFG_VLAN);
604 		rxcfg |= NGBE_RXCFG_VLAN;
605 	} else {
606 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
607 			(rxcfg & NGBE_RXCFG_VLAN);
608 		rxcfg &= ~NGBE_RXCFG_VLAN;
609 	}
610 	rxcfg &= ~NGBE_RXCFG_ENA;
611 
612 	if (restart) {
613 		/* set vlan strip for ring */
614 		ngbe_dev_rx_queue_stop(dev, queue);
615 		wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
616 		wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
617 		wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
618 		ngbe_dev_rx_queue_start(dev, queue);
619 	}
620 }
621 
622 static int
623 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
624 		    enum rte_vlan_type vlan_type,
625 		    uint16_t tpid)
626 {
627 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
628 	int ret = 0;
629 	uint32_t portctrl, vlan_ext, qinq;
630 
631 	portctrl = rd32(hw, NGBE_PORTCTL);
632 
633 	vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
634 	qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
635 	switch (vlan_type) {
636 	case RTE_ETH_VLAN_TYPE_INNER:
637 		if (vlan_ext) {
638 			wr32m(hw, NGBE_VLANCTL,
639 				NGBE_VLANCTL_TPID_MASK,
640 				NGBE_VLANCTL_TPID(tpid));
641 			wr32m(hw, NGBE_DMATXCTRL,
642 				NGBE_DMATXCTRL_TPID_MASK,
643 				NGBE_DMATXCTRL_TPID(tpid));
644 		} else {
645 			ret = -ENOTSUP;
646 			PMD_DRV_LOG(ERR,
647 				"Inner type is not supported by single VLAN");
648 		}
649 
650 		if (qinq) {
651 			wr32m(hw, NGBE_TAGTPID(0),
652 				NGBE_TAGTPID_LSB_MASK,
653 				NGBE_TAGTPID_LSB(tpid));
654 		}
655 		break;
656 	case RTE_ETH_VLAN_TYPE_OUTER:
657 		if (vlan_ext) {
658 			/* Only the high 16-bits is valid */
659 			wr32m(hw, NGBE_EXTAG,
660 				NGBE_EXTAG_VLAN_MASK,
661 				NGBE_EXTAG_VLAN(tpid));
662 		} else {
663 			wr32m(hw, NGBE_VLANCTL,
664 				NGBE_VLANCTL_TPID_MASK,
665 				NGBE_VLANCTL_TPID(tpid));
666 			wr32m(hw, NGBE_DMATXCTRL,
667 				NGBE_DMATXCTRL_TPID_MASK,
668 				NGBE_DMATXCTRL_TPID(tpid));
669 		}
670 
671 		if (qinq) {
672 			wr32m(hw, NGBE_TAGTPID(0),
673 				NGBE_TAGTPID_MSB_MASK,
674 				NGBE_TAGTPID_MSB(tpid));
675 		}
676 		break;
677 	default:
678 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
679 		return -EINVAL;
680 	}
681 
682 	return ret;
683 }
684 
685 void
686 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
687 {
688 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
689 	uint32_t vlnctrl;
690 
691 	PMD_INIT_FUNC_TRACE();
692 
693 	/* Filter Table Disable */
694 	vlnctrl = rd32(hw, NGBE_VLANCTL);
695 	vlnctrl &= ~NGBE_VLANCTL_VFE;
696 	wr32(hw, NGBE_VLANCTL, vlnctrl);
697 }
698 
699 void
700 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
701 {
702 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
703 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
704 	uint32_t vlnctrl;
705 	uint16_t i;
706 
707 	PMD_INIT_FUNC_TRACE();
708 
709 	/* Filter Table Enable */
710 	vlnctrl = rd32(hw, NGBE_VLANCTL);
711 	vlnctrl &= ~NGBE_VLANCTL_CFIENA;
712 	vlnctrl |= NGBE_VLANCTL_VFE;
713 	wr32(hw, NGBE_VLANCTL, vlnctrl);
714 
715 	/* write whatever is in local vfta copy */
716 	for (i = 0; i < NGBE_VFTA_SIZE; i++)
717 		wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
718 }
719 
720 void
721 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
722 {
723 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
724 	struct ngbe_rx_queue *rxq;
725 
726 	if (queue >= NGBE_MAX_RX_QUEUE_NUM)
727 		return;
728 
729 	if (on)
730 		NGBE_SET_HWSTRIP(hwstrip, queue);
731 	else
732 		NGBE_CLEAR_HWSTRIP(hwstrip, queue);
733 
734 	if (queue >= dev->data->nb_rx_queues)
735 		return;
736 
737 	rxq = dev->data->rx_queues[queue];
738 
739 	if (on) {
740 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
741 		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
742 	} else {
743 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
744 		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
745 	}
746 }
747 
748 static void
749 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
750 {
751 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
752 	uint32_t ctrl;
753 
754 	PMD_INIT_FUNC_TRACE();
755 
756 	ctrl = rd32(hw, NGBE_RXCFG(queue));
757 	ctrl &= ~NGBE_RXCFG_VLAN;
758 	wr32(hw, NGBE_RXCFG(queue), ctrl);
759 
760 	/* record those setting for HW strip per queue */
761 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
762 }
763 
764 static void
765 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
766 {
767 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
768 	uint32_t ctrl;
769 
770 	PMD_INIT_FUNC_TRACE();
771 
772 	ctrl = rd32(hw, NGBE_RXCFG(queue));
773 	ctrl |= NGBE_RXCFG_VLAN;
774 	wr32(hw, NGBE_RXCFG(queue), ctrl);
775 
776 	/* record those setting for HW strip per queue */
777 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
778 }
779 
780 static void
781 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
782 {
783 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
784 	uint32_t ctrl;
785 
786 	PMD_INIT_FUNC_TRACE();
787 
788 	ctrl = rd32(hw, NGBE_PORTCTL);
789 	ctrl &= ~NGBE_PORTCTL_VLANEXT;
790 	ctrl &= ~NGBE_PORTCTL_QINQ;
791 	wr32(hw, NGBE_PORTCTL, ctrl);
792 }
793 
794 static void
795 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
796 {
797 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
798 	uint32_t ctrl;
799 
800 	PMD_INIT_FUNC_TRACE();
801 
802 	ctrl  = rd32(hw, NGBE_PORTCTL);
803 	ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
804 	wr32(hw, NGBE_PORTCTL, ctrl);
805 }
806 
807 static void
808 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
809 {
810 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
811 	uint32_t ctrl;
812 
813 	PMD_INIT_FUNC_TRACE();
814 
815 	ctrl = rd32(hw, NGBE_PORTCTL);
816 	ctrl &= ~NGBE_PORTCTL_QINQ;
817 	wr32(hw, NGBE_PORTCTL, ctrl);
818 }
819 
820 static void
821 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
822 {
823 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
824 	uint32_t ctrl;
825 
826 	PMD_INIT_FUNC_TRACE();
827 
828 	ctrl  = rd32(hw, NGBE_PORTCTL);
829 	ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
830 	wr32(hw, NGBE_PORTCTL, ctrl);
831 }
832 
833 void
834 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
835 {
836 	struct ngbe_rx_queue *rxq;
837 	uint16_t i;
838 
839 	PMD_INIT_FUNC_TRACE();
840 
841 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
842 		rxq = dev->data->rx_queues[i];
843 
844 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
845 			ngbe_vlan_hw_strip_enable(dev, i);
846 		else
847 			ngbe_vlan_hw_strip_disable(dev, i);
848 	}
849 }
850 
851 void
852 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
853 {
854 	uint16_t i;
855 	struct rte_eth_rxmode *rxmode;
856 	struct ngbe_rx_queue *rxq;
857 
858 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
859 		rxmode = &dev->data->dev_conf.rxmode;
860 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
861 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
862 				rxq = dev->data->rx_queues[i];
863 				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
864 			}
865 		else
866 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
867 				rxq = dev->data->rx_queues[i];
868 				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
869 			}
870 	}
871 }
872 
873 static int
874 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
875 {
876 	struct rte_eth_rxmode *rxmode;
877 	rxmode = &dev->data->dev_conf.rxmode;
878 
879 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
880 		ngbe_vlan_hw_strip_config(dev);
881 
882 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
883 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
884 			ngbe_vlan_hw_filter_enable(dev);
885 		else
886 			ngbe_vlan_hw_filter_disable(dev);
887 	}
888 
889 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
890 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
891 			ngbe_vlan_hw_extend_enable(dev);
892 		else
893 			ngbe_vlan_hw_extend_disable(dev);
894 	}
895 
896 	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
897 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
898 			ngbe_qinq_hw_strip_enable(dev);
899 		else
900 			ngbe_qinq_hw_strip_disable(dev);
901 	}
902 
903 	return 0;
904 }
905 
906 static int
907 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
908 {
909 	ngbe_config_vlan_strip_on_all_queues(dev, mask);
910 
911 	ngbe_vlan_offload_config(dev, mask);
912 
913 	return 0;
914 }
915 
916 static int
917 ngbe_dev_configure(struct rte_eth_dev *dev)
918 {
919 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
920 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
921 
922 	PMD_INIT_FUNC_TRACE();
923 
924 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
925 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
926 
927 	/* set flag to update link status after init */
928 	intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
929 
930 	/*
931 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
932 	 * allocation Rx preconditions we will reset it.
933 	 */
934 	adapter->rx_bulk_alloc_allowed = true;
935 	adapter->rx_vec_allowed = true;
936 
937 	return 0;
938 }
939 
940 static void
941 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
942 {
943 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
944 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
945 
946 	wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
947 	wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
948 	wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
949 	if (hw->phy.type == ngbe_phy_yt8521s_sfi)
950 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
951 	else
952 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
953 
954 	intr->mask_misc |= NGBE_ICRMISC_GPIO | NGBE_ICRMISC_HEAT;
955 }
956 
957 /*
958  * Configure device link speed and setup link.
959  * It returns 0 on success.
960  */
961 static int
962 ngbe_dev_start(struct rte_eth_dev *dev)
963 {
964 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
965 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
966 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
967 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
968 	uint32_t intr_vector = 0;
969 	int err;
970 	bool link_up = false, negotiate = false;
971 	uint32_t speed = 0;
972 	uint32_t allowed_speeds = 0;
973 	int mask = 0;
974 	int status;
975 	uint32_t *link_speeds;
976 
977 	PMD_INIT_FUNC_TRACE();
978 
979 	/* disable uio/vfio intr/eventfd mapping */
980 	rte_intr_disable(intr_handle);
981 
982 	/* stop adapter */
983 	hw->adapter_stopped = 0;
984 
985 	/* reinitialize adapter, this calls reset and start */
986 	hw->nb_rx_queues = dev->data->nb_rx_queues;
987 	hw->nb_tx_queues = dev->data->nb_tx_queues;
988 	status = ngbe_pf_reset_hw(hw);
989 	if (status != 0)
990 		return -1;
991 	hw->mac.start_hw(hw);
992 	hw->mac.get_link_status = true;
993 
994 	ngbe_set_pcie_master(hw, true);
995 
996 	/* configure PF module if SRIOV enabled */
997 	ngbe_pf_host_configure(dev);
998 
999 	ngbe_dev_phy_intr_setup(dev);
1000 
1001 	/* check and configure queue intr-vector mapping */
1002 	if ((rte_intr_cap_multiple(intr_handle) ||
1003 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
1004 	    dev->data->dev_conf.intr_conf.rxq != 0) {
1005 		intr_vector = dev->data->nb_rx_queues;
1006 		if (rte_intr_efd_enable(intr_handle, intr_vector))
1007 			return -1;
1008 	}
1009 
1010 	if (rte_intr_dp_is_en(intr_handle)) {
1011 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
1012 						   dev->data->nb_rx_queues)) {
1013 			PMD_INIT_LOG(ERR,
1014 				     "Failed to allocate %d rx_queues intr_vec",
1015 				     dev->data->nb_rx_queues);
1016 			return -ENOMEM;
1017 		}
1018 	}
1019 
1020 	/* configure MSI-X for sleep until Rx interrupt */
1021 	ngbe_configure_msix(dev);
1022 
1023 	/* initialize transmission unit */
1024 	ngbe_dev_tx_init(dev);
1025 
1026 	/* This can fail when allocating mbufs for descriptor rings */
1027 	err = ngbe_dev_rx_init(dev);
1028 	if (err != 0) {
1029 		PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
1030 		goto error;
1031 	}
1032 
1033 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1034 		RTE_ETH_VLAN_EXTEND_MASK;
1035 	err = ngbe_vlan_offload_config(dev, mask);
1036 	if (err != 0) {
1037 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1038 		goto error;
1039 	}
1040 
1041 	hw->mac.setup_pba(hw);
1042 	ngbe_pbthresh_set(dev);
1043 	ngbe_configure_port(dev);
1044 
1045 	err = ngbe_dev_rxtx_start(dev);
1046 	if (err < 0) {
1047 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1048 		goto error;
1049 	}
1050 
1051 	/* Skip link setup if loopback mode is enabled. */
1052 	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1053 		goto skip_link_setup;
1054 
1055 	hw->lsc = dev->data->dev_conf.intr_conf.lsc;
1056 
1057 	err = hw->mac.check_link(hw, &speed, &link_up, 0);
1058 	if (err != 0)
1059 		goto error;
1060 	dev->data->dev_link.link_status = link_up;
1061 
1062 	link_speeds = &dev->data->dev_conf.link_speeds;
1063 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
1064 		negotiate = true;
1065 
1066 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1067 	if (err != 0)
1068 		goto error;
1069 
1070 	allowed_speeds = 0;
1071 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1072 		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
1073 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1074 		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
1075 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1076 		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
1077 
1078 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
1079 		PMD_INIT_LOG(ERR, "Invalid link setting");
1080 		goto error;
1081 	}
1082 
1083 	speed = 0x0;
1084 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1085 		speed = hw->mac.default_speeds;
1086 	} else {
1087 		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1088 			speed |= NGBE_LINK_SPEED_1GB_FULL;
1089 		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1090 			speed |= NGBE_LINK_SPEED_100M_FULL;
1091 		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
1092 			speed |= NGBE_LINK_SPEED_10M_FULL;
1093 	}
1094 
1095 	err = hw->phy.init_hw(hw);
1096 	if (err != 0) {
1097 		PMD_INIT_LOG(ERR, "PHY init failed");
1098 		goto error;
1099 	}
1100 	err = hw->mac.setup_link(hw, speed, link_up);
1101 	if (err != 0)
1102 		goto error;
1103 
1104 skip_link_setup:
1105 
1106 	if (rte_intr_allow_others(intr_handle)) {
1107 		ngbe_dev_misc_interrupt_setup(dev);
1108 		/* check if lsc interrupt is enabled */
1109 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1110 			ngbe_dev_lsc_interrupt_setup(dev, TRUE);
1111 		else
1112 			ngbe_dev_lsc_interrupt_setup(dev, FALSE);
1113 		ngbe_dev_macsec_interrupt_setup(dev);
1114 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1115 	} else {
1116 		rte_intr_callback_unregister(intr_handle,
1117 					     ngbe_dev_interrupt_handler, dev);
1118 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1119 			PMD_INIT_LOG(INFO,
1120 				     "LSC won't enable because of no intr multiplex");
1121 	}
1122 
1123 	/* check if rxq interrupt is enabled */
1124 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1125 	    rte_intr_dp_is_en(intr_handle))
1126 		ngbe_dev_rxq_interrupt_setup(dev);
1127 
1128 	/* enable UIO/VFIO intr/eventfd mapping */
1129 	rte_intr_enable(intr_handle);
1130 
1131 	/* resume enabled intr since HW reset */
1132 	ngbe_enable_intr(dev);
1133 
1134 	if (hw->gpio_ctl) {
1135 		/* gpio0 is used to power on/off control*/
1136 		wr32(hw, NGBE_GPIODATA, 0);
1137 	}
1138 
1139 	/*
1140 	 * Update link status right before return, because it may
1141 	 * start link configuration process in a separate thread.
1142 	 */
1143 	ngbe_dev_link_update(dev, 0);
1144 
1145 	ngbe_read_stats_registers(hw, hw_stats);
1146 	hw->offset_loaded = 1;
1147 
1148 	return 0;
1149 
1150 error:
1151 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1152 	ngbe_dev_clear_queues(dev);
1153 	return -EIO;
1154 }
1155 
1156 /*
1157  * Stop device: disable rx and tx functions to allow for reconfiguring.
1158  */
1159 static int
1160 ngbe_dev_stop(struct rte_eth_dev *dev)
1161 {
1162 	struct rte_eth_link link;
1163 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1164 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1165 	struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1166 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1167 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1168 	int vf;
1169 
1170 	if (hw->adapter_stopped)
1171 		goto out;
1172 
1173 	PMD_INIT_FUNC_TRACE();
1174 
1175 	if (hw->gpio_ctl) {
1176 		/* gpio0 is used to power on/off control*/
1177 		wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1178 	}
1179 
1180 	/* disable interrupts */
1181 	ngbe_disable_intr(hw);
1182 
1183 	/* reset the NIC */
1184 	ngbe_pf_reset_hw(hw);
1185 	hw->adapter_stopped = 0;
1186 
1187 	/* stop adapter */
1188 	ngbe_stop_hw(hw);
1189 
1190 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1191 		vfinfo[vf].clear_to_send = false;
1192 
1193 	ngbe_dev_clear_queues(dev);
1194 
1195 	/* Clear stored conf */
1196 	dev->data->scattered_rx = 0;
1197 
1198 	/* Clear recorded link status */
1199 	memset(&link, 0, sizeof(link));
1200 	rte_eth_linkstatus_set(dev, &link);
1201 
1202 	if (!rte_intr_allow_others(intr_handle))
1203 		/* resume to the default handler */
1204 		rte_intr_callback_register(intr_handle,
1205 					   ngbe_dev_interrupt_handler,
1206 					   (void *)dev);
1207 
1208 	/* Clean datapath event and queue/vec mapping */
1209 	rte_intr_efd_disable(intr_handle);
1210 	rte_intr_vec_list_free(intr_handle);
1211 
1212 	ngbe_set_pcie_master(hw, true);
1213 
1214 	adapter->rss_reta_updated = 0;
1215 
1216 	hw->adapter_stopped = true;
1217 	dev->data->dev_started = 0;
1218 
1219 out:
1220 	/* close phy to prevent reset in dev_close from restarting physical link */
1221 	hw->phy.set_phy_power(hw, false);
1222 
1223 	return 0;
1224 }
1225 
1226 /*
1227  * Set device link up: power on.
1228  */
1229 static int
1230 ngbe_dev_set_link_up(struct rte_eth_dev *dev)
1231 {
1232 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1233 
1234 	hw->phy.set_phy_power(hw, true);
1235 
1236 	return 0;
1237 }
1238 
1239 /*
1240  * Set device link down: power off.
1241  */
1242 static int
1243 ngbe_dev_set_link_down(struct rte_eth_dev *dev)
1244 {
1245 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1246 
1247 	hw->phy.set_phy_power(hw, false);
1248 
1249 	return 0;
1250 }
1251 
1252 /*
1253  * Reset and stop device.
1254  */
1255 static int
1256 ngbe_dev_close(struct rte_eth_dev *dev)
1257 {
1258 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1259 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1260 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1261 	int retries = 0;
1262 	int ret;
1263 
1264 	PMD_INIT_FUNC_TRACE();
1265 
1266 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1267 		return 0;
1268 
1269 	ngbe_pf_reset_hw(hw);
1270 
1271 	ngbe_dev_stop(dev);
1272 
1273 	ngbe_dev_free_queues(dev);
1274 
1275 	ngbe_set_pcie_master(hw, false);
1276 
1277 	/* reprogram the RAR[0] in case user changed it. */
1278 	ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1279 
1280 	/* Unlock any pending hardware semaphore */
1281 	ngbe_swfw_lock_reset(hw);
1282 
1283 	/* disable uio intr before callback unregister */
1284 	rte_intr_disable(intr_handle);
1285 
1286 	do {
1287 		ret = rte_intr_callback_unregister(intr_handle,
1288 				ngbe_dev_interrupt_handler, dev);
1289 		if (ret >= 0 || ret == -ENOENT) {
1290 			break;
1291 		} else if (ret != -EAGAIN) {
1292 			PMD_INIT_LOG(ERR,
1293 				"intr callback unregister failed: %d",
1294 				ret);
1295 		}
1296 		rte_delay_ms(100);
1297 	} while (retries++ < (10 + NGBE_LINK_UP_TIME));
1298 
1299 	/* uninitialize PF if max_vfs not zero */
1300 	ngbe_pf_host_uninit(dev);
1301 
1302 	rte_free(dev->data->mac_addrs);
1303 	dev->data->mac_addrs = NULL;
1304 
1305 	rte_free(dev->data->hash_mac_addrs);
1306 	dev->data->hash_mac_addrs = NULL;
1307 
1308 	return ret;
1309 }
1310 
1311 /*
1312  * Reset PF device.
1313  */
1314 static int
1315 ngbe_dev_reset(struct rte_eth_dev *dev)
1316 {
1317 	int ret;
1318 
1319 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
1320 	 * its VF to make them align with it. The detailed notification
1321 	 * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1322 	 * To avoid unexpected behavior in VF, currently reset of PF with
1323 	 * SR-IOV activation is not supported. It might be supported later.
1324 	 */
1325 	if (dev->data->sriov.active)
1326 		return -ENOTSUP;
1327 
1328 	ret = eth_ngbe_dev_uninit(dev);
1329 	if (ret != 0)
1330 		return ret;
1331 
1332 	ret = eth_ngbe_dev_init(dev, NULL);
1333 
1334 	return ret;
1335 }
1336 
1337 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1338 	{                                                       \
1339 		uint32_t current_counter = rd32(hw, reg);       \
1340 		if (current_counter < last_counter)             \
1341 			current_counter += 0x100000000LL;       \
1342 		if (!hw->offset_loaded)                         \
1343 			last_counter = current_counter;         \
1344 		counter = current_counter - last_counter;       \
1345 		counter &= 0xFFFFFFFFLL;                        \
1346 	}
1347 
1348 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1349 	{                                                                \
1350 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1351 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1352 		uint64_t current_counter = (current_counter_msb << 32) | \
1353 			current_counter_lsb;                             \
1354 		if (current_counter < last_counter)                      \
1355 			current_counter += 0x1000000000LL;               \
1356 		if (!hw->offset_loaded)                                  \
1357 			last_counter = current_counter;                  \
1358 		counter = current_counter - last_counter;                \
1359 		counter &= 0xFFFFFFFFFLL;                                \
1360 	}
1361 
1362 void
1363 ngbe_read_stats_registers(struct ngbe_hw *hw,
1364 			   struct ngbe_hw_stats *hw_stats)
1365 {
1366 	unsigned int i;
1367 
1368 	/* QP Stats */
1369 	for (i = 0; i < hw->nb_rx_queues; i++) {
1370 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1371 			hw->qp_last[i].rx_qp_packets,
1372 			hw_stats->qp[i].rx_qp_packets);
1373 		UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1374 			hw->qp_last[i].rx_qp_bytes,
1375 			hw_stats->qp[i].rx_qp_bytes);
1376 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1377 			hw->qp_last[i].rx_qp_mc_packets,
1378 			hw_stats->qp[i].rx_qp_mc_packets);
1379 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1380 			hw->qp_last[i].rx_qp_bc_packets,
1381 			hw_stats->qp[i].rx_qp_bc_packets);
1382 	}
1383 
1384 	for (i = 0; i < hw->nb_tx_queues; i++) {
1385 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1386 			hw->qp_last[i].tx_qp_packets,
1387 			hw_stats->qp[i].tx_qp_packets);
1388 		UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1389 			hw->qp_last[i].tx_qp_bytes,
1390 			hw_stats->qp[i].tx_qp_bytes);
1391 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1392 			hw->qp_last[i].tx_qp_mc_packets,
1393 			hw_stats->qp[i].tx_qp_mc_packets);
1394 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1395 			hw->qp_last[i].tx_qp_bc_packets,
1396 			hw_stats->qp[i].tx_qp_bc_packets);
1397 	}
1398 
1399 	/* PB Stats */
1400 	hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1401 	hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1402 	hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1403 	hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1404 	hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1405 	hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1406 
1407 	hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1408 	hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1409 
1410 	/* DMA Stats */
1411 	hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1412 	hw_stats->tx_dma_drop += rd32(hw, NGBE_DMATXDROP);
1413 	hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1414 	hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1415 	hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1416 	hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1417 	hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1418 
1419 	/* MAC Stats */
1420 	hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1421 	hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1422 	hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1423 
1424 	hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1425 	hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1426 	hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1427 
1428 	hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1429 	hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1430 
1431 	hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1432 	hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1433 	hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1434 	hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1435 	hw_stats->rx_size_512_to_1023_packets +=
1436 			rd64(hw, NGBE_MACRX512TO1023L);
1437 	hw_stats->rx_size_1024_to_max_packets +=
1438 			rd64(hw, NGBE_MACRX1024TOMAXL);
1439 	hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1440 	hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1441 	hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1442 	hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1443 	hw_stats->tx_size_512_to_1023_packets +=
1444 			rd64(hw, NGBE_MACTX512TO1023L);
1445 	hw_stats->tx_size_1024_to_max_packets +=
1446 			rd64(hw, NGBE_MACTX1024TOMAXL);
1447 
1448 	hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1449 	hw_stats->rx_oversize_cnt += rd32(hw, NGBE_MACRXOVERSIZE);
1450 	hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1451 
1452 	/* MNG Stats */
1453 	hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1454 	hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1455 	hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1456 	hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1457 
1458 	/* MACsec Stats */
1459 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1460 	hw_stats->tx_macsec_pkts_encrypted +=
1461 			rd32(hw, NGBE_LSECTX_ENCPKT);
1462 	hw_stats->tx_macsec_pkts_protected +=
1463 			rd32(hw, NGBE_LSECTX_PROTPKT);
1464 	hw_stats->tx_macsec_octets_encrypted +=
1465 			rd32(hw, NGBE_LSECTX_ENCOCT);
1466 	hw_stats->tx_macsec_octets_protected +=
1467 			rd32(hw, NGBE_LSECTX_PROTOCT);
1468 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1469 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1470 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1471 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1472 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1473 	hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1474 	hw_stats->rx_macsec_sc_pkts_unchecked +=
1475 			rd32(hw, NGBE_LSECRX_UNCHKPKT);
1476 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1477 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1478 	for (i = 0; i < 2; i++) {
1479 		hw_stats->rx_macsec_sa_pkts_ok +=
1480 			rd32(hw, NGBE_LSECRX_OKPKT(i));
1481 		hw_stats->rx_macsec_sa_pkts_invalid +=
1482 			rd32(hw, NGBE_LSECRX_INVPKT(i));
1483 		hw_stats->rx_macsec_sa_pkts_notvalid +=
1484 			rd32(hw, NGBE_LSECRX_BADPKT(i));
1485 	}
1486 	for (i = 0; i < 4; i++) {
1487 		hw_stats->rx_macsec_sa_pkts_unusedsa +=
1488 			rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1489 		hw_stats->rx_macsec_sa_pkts_notusingsa +=
1490 			rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1491 	}
1492 	hw_stats->rx_total_missed_packets =
1493 			hw_stats->rx_up_dropped;
1494 }
1495 
1496 static int
1497 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1498 {
1499 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1500 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1501 	struct ngbe_stat_mappings *stat_mappings =
1502 			NGBE_DEV_STAT_MAPPINGS(dev);
1503 	uint32_t i, j;
1504 
1505 	ngbe_read_stats_registers(hw, hw_stats);
1506 
1507 	if (stats == NULL)
1508 		return -EINVAL;
1509 
1510 	/* Fill out the rte_eth_stats statistics structure */
1511 	stats->ipackets = hw_stats->rx_packets;
1512 	stats->ibytes = hw_stats->rx_bytes;
1513 	stats->opackets = hw_stats->tx_packets;
1514 	stats->obytes = hw_stats->tx_bytes;
1515 
1516 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1517 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1518 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1519 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1520 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1521 	for (i = 0; i < NGBE_MAX_QP; i++) {
1522 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1523 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1524 		uint32_t q_map;
1525 
1526 		q_map = (stat_mappings->rqsm[n] >> offset)
1527 				& QMAP_FIELD_RESERVED_BITS_MASK;
1528 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1529 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1530 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1531 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1532 
1533 		q_map = (stat_mappings->tqsm[n] >> offset)
1534 				& QMAP_FIELD_RESERVED_BITS_MASK;
1535 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1536 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1537 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1538 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1539 	}
1540 
1541 	/* Rx Errors */
1542 	stats->imissed  = hw_stats->rx_total_missed_packets +
1543 			  hw_stats->rx_dma_drop;
1544 	stats->ierrors  = hw_stats->rx_crc_errors +
1545 			  hw_stats->rx_mac_short_packet_dropped +
1546 			  hw_stats->rx_length_errors +
1547 			  hw_stats->rx_undersize_errors +
1548 			  hw_stats->rdb_drp_cnt +
1549 			  hw_stats->rx_illegal_byte_errors +
1550 			  hw_stats->rx_error_bytes +
1551 			  hw_stats->rx_fragment_errors;
1552 
1553 	/* Tx Errors */
1554 	stats->oerrors  = 0;
1555 	return 0;
1556 }
1557 
1558 static int
1559 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1560 {
1561 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1562 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1563 
1564 	/* HW registers are cleared on read */
1565 	hw->offset_loaded = 0;
1566 	ngbe_dev_stats_get(dev, NULL);
1567 	hw->offset_loaded = 1;
1568 
1569 	/* Reset software totals */
1570 	memset(hw_stats, 0, sizeof(*hw_stats));
1571 
1572 	return 0;
1573 }
1574 
1575 /* This function calculates the number of xstats based on the current config */
1576 static unsigned
1577 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1578 {
1579 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1580 	return NGBE_NB_HW_STATS +
1581 	       NGBE_NB_QP_STATS * nb_queues;
1582 }
1583 
1584 static inline int
1585 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1586 {
1587 	int nb, st;
1588 
1589 	/* Extended stats from ngbe_hw_stats */
1590 	if (id < NGBE_NB_HW_STATS) {
1591 		snprintf(name, size, "[hw]%s",
1592 			rte_ngbe_stats_strings[id].name);
1593 		return 0;
1594 	}
1595 	id -= NGBE_NB_HW_STATS;
1596 
1597 	/* Queue Stats */
1598 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1599 		nb = id / NGBE_NB_QP_STATS;
1600 		st = id % NGBE_NB_QP_STATS;
1601 		snprintf(name, size, "[q%u]%s", nb,
1602 			rte_ngbe_qp_strings[st].name);
1603 		return 0;
1604 	}
1605 	id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1606 
1607 	return -(int)(id + 1);
1608 }
1609 
1610 static inline int
1611 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1612 {
1613 	int nb, st;
1614 
1615 	/* Extended stats from ngbe_hw_stats */
1616 	if (id < NGBE_NB_HW_STATS) {
1617 		*offset = rte_ngbe_stats_strings[id].offset;
1618 		return 0;
1619 	}
1620 	id -= NGBE_NB_HW_STATS;
1621 
1622 	/* Queue Stats */
1623 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1624 		nb = id / NGBE_NB_QP_STATS;
1625 		st = id % NGBE_NB_QP_STATS;
1626 		*offset = rte_ngbe_qp_strings[st].offset +
1627 			nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1628 		return 0;
1629 	}
1630 
1631 	return -1;
1632 }
1633 
1634 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1635 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1636 {
1637 	unsigned int i, count;
1638 
1639 	count = ngbe_xstats_calc_num(dev);
1640 	if (xstats_names == NULL)
1641 		return count;
1642 
1643 	/* Note: limit >= cnt_stats checked upstream
1644 	 * in rte_eth_xstats_names()
1645 	 */
1646 	limit = min(limit, count);
1647 
1648 	/* Extended stats from ngbe_hw_stats */
1649 	for (i = 0; i < limit; i++) {
1650 		if (ngbe_get_name_by_id(i, xstats_names[i].name,
1651 			sizeof(xstats_names[i].name))) {
1652 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1653 			break;
1654 		}
1655 	}
1656 
1657 	return i;
1658 }
1659 
1660 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1661 	const uint64_t *ids,
1662 	struct rte_eth_xstat_name *xstats_names,
1663 	unsigned int limit)
1664 {
1665 	unsigned int i;
1666 
1667 	if (ids == NULL)
1668 		return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1669 
1670 	for (i = 0; i < limit; i++) {
1671 		if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1672 				sizeof(xstats_names[i].name))) {
1673 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1674 			return -1;
1675 		}
1676 	}
1677 
1678 	return i;
1679 }
1680 
1681 static int
1682 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1683 					 unsigned int limit)
1684 {
1685 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1686 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1687 	unsigned int i, count;
1688 
1689 	ngbe_read_stats_registers(hw, hw_stats);
1690 
1691 	/* If this is a reset xstats is NULL, and we have cleared the
1692 	 * registers by reading them.
1693 	 */
1694 	count = ngbe_xstats_calc_num(dev);
1695 	if (xstats == NULL)
1696 		return count;
1697 
1698 	limit = min(limit, ngbe_xstats_calc_num(dev));
1699 
1700 	/* Extended stats from ngbe_hw_stats */
1701 	for (i = 0; i < limit; i++) {
1702 		uint32_t offset = 0;
1703 
1704 		if (ngbe_get_offset_by_id(i, &offset)) {
1705 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1706 			break;
1707 		}
1708 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1709 		xstats[i].id = i;
1710 	}
1711 
1712 	return i;
1713 }
1714 
1715 static int
1716 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1717 					 unsigned int limit)
1718 {
1719 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1720 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1721 	unsigned int i, count;
1722 
1723 	ngbe_read_stats_registers(hw, hw_stats);
1724 
1725 	/* If this is a reset xstats is NULL, and we have cleared the
1726 	 * registers by reading them.
1727 	 */
1728 	count = ngbe_xstats_calc_num(dev);
1729 	if (values == NULL)
1730 		return count;
1731 
1732 	limit = min(limit, ngbe_xstats_calc_num(dev));
1733 
1734 	/* Extended stats from ngbe_hw_stats */
1735 	for (i = 0; i < limit; i++) {
1736 		uint32_t offset;
1737 
1738 		if (ngbe_get_offset_by_id(i, &offset)) {
1739 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1740 			break;
1741 		}
1742 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1743 	}
1744 
1745 	return i;
1746 }
1747 
1748 static int
1749 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1750 		uint64_t *values, unsigned int limit)
1751 {
1752 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1753 	unsigned int i;
1754 
1755 	if (ids == NULL)
1756 		return ngbe_dev_xstats_get_(dev, values, limit);
1757 
1758 	for (i = 0; i < limit; i++) {
1759 		uint32_t offset;
1760 
1761 		if (ngbe_get_offset_by_id(ids[i], &offset)) {
1762 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1763 			break;
1764 		}
1765 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1766 	}
1767 
1768 	return i;
1769 }
1770 
1771 static int
1772 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1773 {
1774 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1775 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1776 
1777 	/* HW registers are cleared on read */
1778 	hw->offset_loaded = 0;
1779 	ngbe_read_stats_registers(hw, hw_stats);
1780 	hw->offset_loaded = 1;
1781 
1782 	/* Reset software totals */
1783 	memset(hw_stats, 0, sizeof(*hw_stats));
1784 
1785 	return 0;
1786 }
1787 
1788 static int
1789 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1790 {
1791 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1792 	int ret;
1793 
1794 	ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1795 
1796 	if (ret < 0)
1797 		return -EINVAL;
1798 
1799 	ret += 1; /* add the size of '\0' */
1800 	if (fw_size < (size_t)ret)
1801 		return ret;
1802 
1803 	return 0;
1804 }
1805 
1806 static int
1807 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1808 {
1809 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1810 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1811 
1812 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1813 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1814 	dev_info->min_rx_bufsize = 1024;
1815 	dev_info->max_rx_pktlen = 15872;
1816 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1817 	dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1818 	dev_info->max_vfs = pci_dev->max_vfs;
1819 	dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1820 	dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1821 				     dev_info->rx_queue_offload_capa);
1822 	dev_info->tx_queue_offload_capa = 0;
1823 	dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1824 
1825 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1826 		.rx_thresh = {
1827 			.pthresh = NGBE_DEFAULT_RX_PTHRESH,
1828 			.hthresh = NGBE_DEFAULT_RX_HTHRESH,
1829 			.wthresh = NGBE_DEFAULT_RX_WTHRESH,
1830 		},
1831 		.rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1832 		.rx_drop_en = 0,
1833 		.offloads = 0,
1834 	};
1835 
1836 	dev_info->default_txconf = (struct rte_eth_txconf) {
1837 		.tx_thresh = {
1838 			.pthresh = NGBE_DEFAULT_TX_PTHRESH,
1839 			.hthresh = NGBE_DEFAULT_TX_HTHRESH,
1840 			.wthresh = NGBE_DEFAULT_TX_WTHRESH,
1841 		},
1842 		.tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1843 		.offloads = 0,
1844 	};
1845 
1846 	dev_info->rx_desc_lim = rx_desc_lim;
1847 	dev_info->tx_desc_lim = tx_desc_lim;
1848 
1849 	dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1850 	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1851 	dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1852 
1853 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1854 				RTE_ETH_LINK_SPEED_10M;
1855 
1856 	/* Driver-preferred Rx/Tx parameters */
1857 	dev_info->default_rxportconf.burst_size = 32;
1858 	dev_info->default_txportconf.burst_size = 32;
1859 	dev_info->default_rxportconf.nb_queues = 1;
1860 	dev_info->default_txportconf.nb_queues = 1;
1861 	dev_info->default_rxportconf.ring_size = 256;
1862 	dev_info->default_txportconf.ring_size = 256;
1863 
1864 	return 0;
1865 }
1866 
1867 const uint32_t *
1868 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
1869 {
1870 	if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1871 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)
1872 	    dev->rx_pkt_burst == ngbe_recv_pkts_vec ||
1873 	    dev->rx_pkt_burst == ngbe_recv_scattered_pkts_vec ||
1874 #endif
1875 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1876 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1877 	    dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1878 		return ngbe_get_supported_ptypes(no_of_elements);
1879 
1880 	return NULL;
1881 }
1882 
1883 static void
1884 ngbe_dev_overheat(struct rte_eth_dev *dev)
1885 {
1886 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1887 	s32 temp_state;
1888 
1889 	temp_state = hw->mac.check_overtemp(hw);
1890 	if (!temp_state)
1891 		return;
1892 
1893 	if (temp_state == NGBE_ERR_UNDERTEMP) {
1894 		PMD_DRV_LOG(CRIT, "Network adapter has been started again, "
1895 			"since the temperature has been back to normal state.");
1896 		wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, NGBE_PBRXCTL_ENA);
1897 		ngbe_dev_set_link_up(dev);
1898 	} else if (temp_state == NGBE_ERR_OVERTEMP) {
1899 		PMD_DRV_LOG(CRIT, "Network adapter has been stopped because it has over heated.");
1900 		wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0);
1901 		ngbe_dev_set_link_down(dev);
1902 	}
1903 }
1904 
1905 /* return 0 means link status changed, -1 means not changed */
1906 int
1907 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1908 			    int wait_to_complete)
1909 {
1910 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1911 	struct rte_eth_link link;
1912 	u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1913 	u32 lan_speed = 0;
1914 	bool link_up;
1915 	int err;
1916 	int wait = 1;
1917 
1918 	memset(&link, 0, sizeof(link));
1919 	link.link_status = RTE_ETH_LINK_DOWN;
1920 	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1921 	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1922 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1923 			~RTE_ETH_LINK_SPEED_AUTONEG);
1924 
1925 	hw->mac.get_link_status = true;
1926 
1927 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
1928 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1929 		wait = 0;
1930 
1931 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1932 	if (err != 0) {
1933 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1934 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1935 		return rte_eth_linkstatus_set(dev, &link);
1936 	}
1937 
1938 	if (!link_up)
1939 		return rte_eth_linkstatus_set(dev, &link);
1940 
1941 	link.link_status = RTE_ETH_LINK_UP;
1942 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1943 
1944 	switch (link_speed) {
1945 	default:
1946 	case NGBE_LINK_SPEED_UNKNOWN:
1947 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1948 		break;
1949 
1950 	case NGBE_LINK_SPEED_10M_FULL:
1951 		link.link_speed = RTE_ETH_SPEED_NUM_10M;
1952 		lan_speed = 0;
1953 		break;
1954 
1955 	case NGBE_LINK_SPEED_100M_FULL:
1956 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
1957 		lan_speed = 1;
1958 		break;
1959 
1960 	case NGBE_LINK_SPEED_1GB_FULL:
1961 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
1962 		lan_speed = 2;
1963 		break;
1964 	}
1965 
1966 	if (hw->is_pf) {
1967 		wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1968 		if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1969 				NGBE_LINK_SPEED_100M_FULL |
1970 				NGBE_LINK_SPEED_10M_FULL)) {
1971 			wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1972 				NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1973 		}
1974 		wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_PROMISC,
1975 			NGBE_MACRXFLT_PROMISC);
1976 	}
1977 
1978 	return rte_eth_linkstatus_set(dev, &link);
1979 }
1980 
1981 static int
1982 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1983 {
1984 	return ngbe_dev_link_update_share(dev, wait_to_complete);
1985 }
1986 
1987 static int
1988 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1989 {
1990 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1991 	uint32_t fctrl;
1992 
1993 	fctrl = rd32(hw, NGBE_PSRCTL);
1994 	fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1995 	wr32(hw, NGBE_PSRCTL, fctrl);
1996 
1997 	return 0;
1998 }
1999 
2000 static int
2001 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2002 {
2003 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2004 	uint32_t fctrl;
2005 
2006 	fctrl = rd32(hw, NGBE_PSRCTL);
2007 	fctrl &= (~NGBE_PSRCTL_UCP);
2008 	if (dev->data->all_multicast == 1)
2009 		fctrl |= NGBE_PSRCTL_MCP;
2010 	else
2011 		fctrl &= (~NGBE_PSRCTL_MCP);
2012 	wr32(hw, NGBE_PSRCTL, fctrl);
2013 
2014 	return 0;
2015 }
2016 
2017 static int
2018 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2019 {
2020 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2021 	uint32_t fctrl;
2022 
2023 	fctrl = rd32(hw, NGBE_PSRCTL);
2024 	fctrl |= NGBE_PSRCTL_MCP;
2025 	wr32(hw, NGBE_PSRCTL, fctrl);
2026 
2027 	return 0;
2028 }
2029 
2030 static int
2031 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2032 {
2033 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2034 	uint32_t fctrl;
2035 
2036 	if (dev->data->promiscuous == 1)
2037 		return 0; /* must remain in all_multicast mode */
2038 
2039 	fctrl = rd32(hw, NGBE_PSRCTL);
2040 	fctrl &= (~NGBE_PSRCTL_MCP);
2041 	wr32(hw, NGBE_PSRCTL, fctrl);
2042 
2043 	return 0;
2044 }
2045 
2046 /**
2047  * It clears the interrupt causes and enables the interrupt.
2048  * It will be called once only during NIC initialized.
2049  *
2050  * @param dev
2051  *  Pointer to struct rte_eth_dev.
2052  * @param on
2053  *  Enable or Disable.
2054  *
2055  * @return
2056  *  - On success, zero.
2057  *  - On failure, a negative value.
2058  */
2059 static int
2060 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2061 {
2062 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2063 
2064 	ngbe_dev_link_status_print(dev);
2065 	if (on != 0) {
2066 		intr->mask_misc |= NGBE_ICRMISC_PHY;
2067 		intr->mask_misc |= NGBE_ICRMISC_GPIO;
2068 	} else {
2069 		intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2070 		intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
2071 	}
2072 
2073 	return 0;
2074 }
2075 
2076 /**
2077  * It clears the interrupt causes and enables the interrupt.
2078  * It will be called once only during NIC initialized.
2079  *
2080  * @param dev
2081  *  Pointer to struct rte_eth_dev.
2082  *
2083  * @return
2084  *  - On success, zero.
2085  *  - On failure, a negative value.
2086  */
2087 static int
2088 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
2089 {
2090 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2091 	u64 mask;
2092 
2093 	mask = NGBE_ICR_MASK;
2094 	mask &= (1ULL << NGBE_MISC_VEC_ID);
2095 	intr->mask |= mask;
2096 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
2097 
2098 	return 0;
2099 }
2100 
2101 /**
2102  * It clears the interrupt causes and enables the interrupt.
2103  * It will be called once only during NIC initialized.
2104  *
2105  * @param dev
2106  *  Pointer to struct rte_eth_dev.
2107  *
2108  * @return
2109  *  - On success, zero.
2110  *  - On failure, a negative value.
2111  */
2112 static int
2113 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2114 {
2115 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2116 	u64 mask;
2117 
2118 	mask = NGBE_ICR_MASK;
2119 	mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
2120 	intr->mask |= mask;
2121 
2122 	return 0;
2123 }
2124 
2125 /**
2126  * It clears the interrupt causes and enables the interrupt.
2127  * It will be called once only during NIC initialized.
2128  *
2129  * @param dev
2130  *  Pointer to struct rte_eth_dev.
2131  *
2132  * @return
2133  *  - On success, zero.
2134  *  - On failure, a negative value.
2135  */
2136 static int
2137 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2138 {
2139 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2140 
2141 	intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
2142 
2143 	return 0;
2144 }
2145 
2146 /*
2147  * It reads ICR and sets flag for the link_update.
2148  *
2149  * @param dev
2150  *  Pointer to struct rte_eth_dev.
2151  *
2152  * @return
2153  *  - On success, zero.
2154  *  - On failure, a negative value.
2155  */
2156 static int
2157 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2158 {
2159 	uint32_t eicr;
2160 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2161 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2162 
2163 	/* read-on-clear nic registers here */
2164 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2165 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2166 
2167 	intr->flags = 0;
2168 
2169 	/* set flag for async link update */
2170 	if (eicr & NGBE_ICRMISC_PHY)
2171 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2172 
2173 	if (eicr & NGBE_ICRMISC_VFMBX)
2174 		intr->flags |= NGBE_FLAG_MAILBOX;
2175 
2176 	if (eicr & NGBE_ICRMISC_LNKSEC)
2177 		intr->flags |= NGBE_FLAG_MACSEC;
2178 
2179 	if (eicr & NGBE_ICRMISC_GPIO)
2180 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2181 
2182 	if (eicr & NGBE_ICRMISC_HEAT)
2183 		intr->flags |= NGBE_FLAG_OVERHEAT;
2184 
2185 	((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0;
2186 
2187 	return 0;
2188 }
2189 
2190 /**
2191  * It gets and then prints the link status.
2192  *
2193  * @param dev
2194  *  Pointer to struct rte_eth_dev.
2195  *
2196  * @return
2197  *  - On success, zero.
2198  *  - On failure, a negative value.
2199  */
2200 static void
2201 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2202 {
2203 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2204 	struct rte_eth_link link;
2205 
2206 	rte_eth_linkstatus_get(dev, &link);
2207 
2208 	if (link.link_status == RTE_ETH_LINK_UP) {
2209 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2210 					(int)(dev->data->port_id),
2211 					(unsigned int)link.link_speed,
2212 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2213 					"full-duplex" : "half-duplex");
2214 	} else {
2215 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
2216 				(int)(dev->data->port_id));
2217 	}
2218 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2219 				pci_dev->addr.domain,
2220 				pci_dev->addr.bus,
2221 				pci_dev->addr.devid,
2222 				pci_dev->addr.function);
2223 }
2224 
2225 /*
2226  * It executes link_update after knowing an interrupt occurred.
2227  *
2228  * @param dev
2229  *  Pointer to struct rte_eth_dev.
2230  *
2231  * @return
2232  *  - On success, zero.
2233  *  - On failure, a negative value.
2234  */
2235 static int
2236 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2237 {
2238 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2239 
2240 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2241 
2242 	if (intr->flags & NGBE_FLAG_MAILBOX) {
2243 		ngbe_pf_mbx_process(dev);
2244 		intr->flags &= ~NGBE_FLAG_MAILBOX;
2245 	}
2246 
2247 	if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2248 		struct rte_eth_link link;
2249 
2250 		/*get the link status before link update, for predicting later*/
2251 		rte_eth_linkstatus_get(dev, &link);
2252 
2253 		ngbe_dev_link_update(dev, 0);
2254 		intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2255 		ngbe_dev_link_status_print(dev);
2256 		if (dev->data->dev_link.link_speed != link.link_speed)
2257 			rte_eth_dev_callback_process(dev,
2258 				RTE_ETH_EVENT_INTR_LSC, NULL);
2259 	}
2260 
2261 	if (intr->flags & NGBE_FLAG_OVERHEAT) {
2262 		ngbe_dev_overheat(dev);
2263 		intr->flags &= ~NGBE_FLAG_OVERHEAT;
2264 	}
2265 
2266 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
2267 	ngbe_enable_intr(dev);
2268 
2269 	return 0;
2270 }
2271 
2272 /**
2273  * Interrupt handler triggered by NIC  for handling
2274  * specific interrupt.
2275  *
2276  * @param param
2277  *  The address of parameter (struct rte_eth_dev *) registered before.
2278  */
2279 static void
2280 ngbe_dev_interrupt_handler(void *param)
2281 {
2282 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2283 
2284 	ngbe_dev_interrupt_get_status(dev);
2285 	ngbe_dev_interrupt_action(dev);
2286 }
2287 
2288 static int
2289 ngbe_dev_led_on(struct rte_eth_dev *dev)
2290 {
2291 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2292 	return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
2293 }
2294 
2295 static int
2296 ngbe_dev_led_off(struct rte_eth_dev *dev)
2297 {
2298 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2299 	return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
2300 }
2301 
2302 static int
2303 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2304 {
2305 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2306 	uint32_t mflcn_reg;
2307 	uint32_t fccfg_reg;
2308 	int rx_pause;
2309 	int tx_pause;
2310 
2311 	fc_conf->pause_time = hw->fc.pause_time;
2312 	fc_conf->high_water = hw->fc.high_water;
2313 	fc_conf->low_water = hw->fc.low_water;
2314 	fc_conf->send_xon = hw->fc.send_xon;
2315 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2316 
2317 	/*
2318 	 * Return rx_pause status according to actual setting of
2319 	 * RXFCCFG register.
2320 	 */
2321 	mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2322 	if (mflcn_reg & NGBE_RXFCCFG_FC)
2323 		rx_pause = 1;
2324 	else
2325 		rx_pause = 0;
2326 
2327 	/*
2328 	 * Return tx_pause status according to actual setting of
2329 	 * TXFCCFG register.
2330 	 */
2331 	fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2332 	if (fccfg_reg & NGBE_TXFCCFG_FC)
2333 		tx_pause = 1;
2334 	else
2335 		tx_pause = 0;
2336 
2337 	if (rx_pause && tx_pause)
2338 		fc_conf->mode = RTE_ETH_FC_FULL;
2339 	else if (rx_pause)
2340 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2341 	else if (tx_pause)
2342 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2343 	else
2344 		fc_conf->mode = RTE_ETH_FC_NONE;
2345 
2346 	return 0;
2347 }
2348 
2349 static int
2350 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2351 {
2352 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2353 	int err;
2354 	uint32_t rx_buf_size;
2355 	uint32_t max_high_water;
2356 	enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2357 		ngbe_fc_none,
2358 		ngbe_fc_rx_pause,
2359 		ngbe_fc_tx_pause,
2360 		ngbe_fc_full
2361 	};
2362 
2363 	PMD_INIT_FUNC_TRACE();
2364 
2365 	rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2366 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2367 
2368 	/*
2369 	 * At least reserve one Ethernet frame for watermark
2370 	 * high_water/low_water in kilo bytes for ngbe
2371 	 */
2372 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2373 	if (fc_conf->high_water > max_high_water ||
2374 	    fc_conf->high_water < fc_conf->low_water) {
2375 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2376 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2377 		return -EINVAL;
2378 	}
2379 
2380 	hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2381 	hw->fc.pause_time     = fc_conf->pause_time;
2382 	hw->fc.high_water     = fc_conf->high_water;
2383 	hw->fc.low_water      = fc_conf->low_water;
2384 	hw->fc.send_xon       = fc_conf->send_xon;
2385 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2386 
2387 	err = hw->mac.fc_enable(hw);
2388 
2389 	/* Not negotiated is not an error case */
2390 	if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2391 		wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2392 		      (fc_conf->mac_ctrl_frame_fwd
2393 		       ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2394 		ngbe_flush(hw);
2395 
2396 		return 0;
2397 	}
2398 
2399 	PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2400 	return -EIO;
2401 }
2402 
2403 /* Additional bittime to account for NGBE framing */
2404 #define NGBE_ETH_FRAMING 20
2405 
2406 /*
2407  * ngbe_fc_hpbthresh_set - calculate high water mark for flow control
2408  *
2409  * @dv_id: device interface delay
2410  * @pb: packet buffer to calculate
2411  */
2412 static s32
2413 ngbe_fc_hpbthresh_set(struct rte_eth_dev *dev)
2414 {
2415 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2416 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2417 	u32 max_frame_size, tc, dv_id, rx_pb;
2418 	s32 kb, marker;
2419 
2420 	/* Calculate max LAN frame size */
2421 	max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK);
2422 	tc = max_frame_size + NGBE_ETH_FRAMING;
2423 
2424 	/* Calculate delay value for device */
2425 	dv_id = NGBE_DV(tc, tc);
2426 
2427 	/* Loopback switch introduces additional latency */
2428 	if (pci_dev->max_vfs)
2429 		dv_id += NGBE_B2BT(tc);
2430 
2431 	/* Delay value is calculated in bit times convert to KB */
2432 	kb = NGBE_BT2KB(dv_id);
2433 	rx_pb = rd32(hw, NGBE_PBRXSIZE) >> 10;
2434 
2435 	marker = rx_pb - kb;
2436 
2437 	/* It is possible that the packet buffer is not large enough
2438 	 * to provide required headroom. In this case throw an error
2439 	 * to user and do the best we can.
2440 	 */
2441 	if (marker < 0) {
2442 		PMD_DRV_LOG(WARNING, "Packet Buffer can not provide enough headroom to support flow control.");
2443 		marker = tc + 1;
2444 	}
2445 
2446 	return marker;
2447 }
2448 
2449 /*
2450  * ngbe_fc_lpbthresh_set - calculate low water mark for flow control
2451  *
2452  * @dv_id: device interface delay
2453  */
2454 static s32
2455 ngbe_fc_lpbthresh_set(struct rte_eth_dev *dev)
2456 {
2457 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2458 	u32 max_frame_size, tc, dv_id;
2459 	s32 kb;
2460 
2461 	/* Calculate max LAN frame size */
2462 	max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK);
2463 	tc = max_frame_size + NGBE_ETH_FRAMING;
2464 
2465 	/* Calculate delay value for device */
2466 	dv_id = NGBE_LOW_DV(tc);
2467 
2468 	/* Delay value is calculated in bit times convert to KB */
2469 	kb = NGBE_BT2KB(dv_id);
2470 
2471 	return kb;
2472 }
2473 
2474 /*
2475  * ngbe_pbthresh_setup - calculate and setup high low water marks
2476  */
2477 static void
2478 ngbe_pbthresh_set(struct rte_eth_dev *dev)
2479 {
2480 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2481 
2482 	hw->fc.high_water = ngbe_fc_hpbthresh_set(dev);
2483 	hw->fc.low_water = ngbe_fc_lpbthresh_set(dev);
2484 
2485 	/* Low water marks must not be larger than high water marks */
2486 	if (hw->fc.low_water > hw->fc.high_water)
2487 		hw->fc.low_water = 0;
2488 }
2489 
2490 int
2491 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2492 			  struct rte_eth_rss_reta_entry64 *reta_conf,
2493 			  uint16_t reta_size)
2494 {
2495 	uint8_t i, j, mask;
2496 	uint32_t reta;
2497 	uint16_t idx, shift;
2498 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2499 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2500 
2501 	PMD_INIT_FUNC_TRACE();
2502 
2503 	if (!hw->is_pf) {
2504 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2505 			"NIC.");
2506 		return -ENOTSUP;
2507 	}
2508 
2509 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2510 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2511 			"(%d) doesn't match the number hardware can supported "
2512 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2513 		return -EINVAL;
2514 	}
2515 
2516 	for (i = 0; i < reta_size; i += 4) {
2517 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2518 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2519 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2520 		if (!mask)
2521 			continue;
2522 
2523 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2524 		for (j = 0; j < 4; j++) {
2525 			if (RS8(mask, j, 0x1)) {
2526 				reta  &= ~(MS32(8 * j, 0xFF));
2527 				reta |= LS32(reta_conf[idx].reta[shift + j],
2528 						8 * j, 0xFF);
2529 			}
2530 		}
2531 		wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2532 	}
2533 	adapter->rss_reta_updated = 1;
2534 
2535 	return 0;
2536 }
2537 
2538 int
2539 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2540 			 struct rte_eth_rss_reta_entry64 *reta_conf,
2541 			 uint16_t reta_size)
2542 {
2543 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2544 	uint8_t i, j, mask;
2545 	uint32_t reta;
2546 	uint16_t idx, shift;
2547 
2548 	PMD_INIT_FUNC_TRACE();
2549 
2550 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2551 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2552 			"(%d) doesn't match the number hardware can supported "
2553 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2554 		return -EINVAL;
2555 	}
2556 
2557 	for (i = 0; i < reta_size; i += 4) {
2558 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2559 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2560 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2561 		if (!mask)
2562 			continue;
2563 
2564 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2565 		for (j = 0; j < 4; j++) {
2566 			if (RS8(mask, j, 0x1))
2567 				reta_conf[idx].reta[shift + j] =
2568 					(uint16_t)RS32(reta, 8 * j, 0xFF);
2569 		}
2570 	}
2571 
2572 	return 0;
2573 }
2574 
2575 static int
2576 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2577 				uint32_t index, uint32_t pool)
2578 {
2579 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2580 	uint32_t enable_addr = 1;
2581 
2582 	return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2583 			     pool, enable_addr);
2584 }
2585 
2586 static void
2587 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2588 {
2589 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2590 
2591 	ngbe_clear_rar(hw, index);
2592 }
2593 
2594 static int
2595 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2596 {
2597 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2598 
2599 	ngbe_remove_rar(dev, 0);
2600 	ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2601 
2602 	return 0;
2603 }
2604 
2605 static int
2606 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2607 {
2608 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2609 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
2610 	struct rte_eth_dev_data *dev_data = dev->data;
2611 
2612 	/* If device is started, refuse mtu that requires the support of
2613 	 * scattered packets when this feature has not been enabled before.
2614 	 */
2615 	if (dev_data->dev_started && !dev_data->scattered_rx &&
2616 	    (frame_size + 2 * RTE_VLAN_HLEN >
2617 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2618 		PMD_INIT_LOG(ERR, "Stop port first.");
2619 		return -EINVAL;
2620 	}
2621 
2622 	wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2623 		NGBE_FRMSZ_MAX(frame_size));
2624 
2625 	return 0;
2626 }
2627 
2628 static uint32_t
2629 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2630 {
2631 	uint32_t vector = 0;
2632 
2633 	switch (hw->mac.mc_filter_type) {
2634 	case 0:   /* use bits [47:36] of the address */
2635 		vector = ((uc_addr->addr_bytes[4] >> 4) |
2636 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
2637 		break;
2638 	case 1:   /* use bits [46:35] of the address */
2639 		vector = ((uc_addr->addr_bytes[4] >> 3) |
2640 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
2641 		break;
2642 	case 2:   /* use bits [45:34] of the address */
2643 		vector = ((uc_addr->addr_bytes[4] >> 2) |
2644 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
2645 		break;
2646 	case 3:   /* use bits [43:32] of the address */
2647 		vector = ((uc_addr->addr_bytes[4]) |
2648 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
2649 		break;
2650 	default:  /* Invalid mc_filter_type */
2651 		break;
2652 	}
2653 
2654 	/* vector can only be 12-bits or boundary will be exceeded */
2655 	vector &= 0xFFF;
2656 	return vector;
2657 }
2658 
2659 static int
2660 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2661 			struct rte_ether_addr *mac_addr, uint8_t on)
2662 {
2663 	uint32_t vector;
2664 	uint32_t uta_idx;
2665 	uint32_t reg_val;
2666 	uint32_t uta_mask;
2667 	uint32_t psrctl;
2668 
2669 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2670 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2671 
2672 	vector = ngbe_uta_vector(hw, mac_addr);
2673 	uta_idx = (vector >> 5) & 0x7F;
2674 	uta_mask = 0x1UL << (vector & 0x1F);
2675 
2676 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2677 		return 0;
2678 
2679 	reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2680 	if (on) {
2681 		uta_info->uta_in_use++;
2682 		reg_val |= uta_mask;
2683 		uta_info->uta_shadow[uta_idx] |= uta_mask;
2684 	} else {
2685 		uta_info->uta_in_use--;
2686 		reg_val &= ~uta_mask;
2687 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2688 	}
2689 
2690 	wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2691 
2692 	psrctl = rd32(hw, NGBE_PSRCTL);
2693 	if (uta_info->uta_in_use > 0)
2694 		psrctl |= NGBE_PSRCTL_UCHFENA;
2695 	else
2696 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2697 
2698 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2699 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2700 	wr32(hw, NGBE_PSRCTL, psrctl);
2701 
2702 	return 0;
2703 }
2704 
2705 static int
2706 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2707 {
2708 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2709 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2710 	uint32_t psrctl;
2711 	int i;
2712 
2713 	if (on) {
2714 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2715 			uta_info->uta_shadow[i] = ~0;
2716 			wr32(hw, NGBE_UCADDRTBL(i), ~0);
2717 		}
2718 	} else {
2719 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2720 			uta_info->uta_shadow[i] = 0;
2721 			wr32(hw, NGBE_UCADDRTBL(i), 0);
2722 		}
2723 	}
2724 
2725 	psrctl = rd32(hw, NGBE_PSRCTL);
2726 	if (on)
2727 		psrctl |= NGBE_PSRCTL_UCHFENA;
2728 	else
2729 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2730 
2731 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2732 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2733 	wr32(hw, NGBE_PSRCTL, psrctl);
2734 
2735 	return 0;
2736 }
2737 
2738 /**
2739  * Set the IVAR registers, mapping interrupt causes to vectors
2740  * @param hw
2741  *  pointer to ngbe_hw struct
2742  * @direction
2743  *  0 for Rx, 1 for Tx, -1 for other causes
2744  * @queue
2745  *  queue to map the corresponding interrupt to
2746  * @msix_vector
2747  *  the vector to map to the corresponding queue
2748  */
2749 void
2750 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2751 		   uint8_t queue, uint8_t msix_vector)
2752 {
2753 	uint32_t tmp, idx;
2754 
2755 	if (direction == -1) {
2756 		/* other causes */
2757 		msix_vector |= NGBE_IVARMISC_VLD;
2758 		idx = 0;
2759 		tmp = rd32(hw, NGBE_IVARMISC);
2760 		tmp &= ~(0xFF << idx);
2761 		tmp |= (msix_vector << idx);
2762 		wr32(hw, NGBE_IVARMISC, tmp);
2763 	} else {
2764 		/* rx or tx causes */
2765 		/* Workaround for ICR lost */
2766 		idx = ((16 * (queue & 1)) + (8 * direction));
2767 		tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2768 		tmp &= ~(0xFF << idx);
2769 		tmp |= (msix_vector << idx);
2770 		wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2771 	}
2772 }
2773 
2774 /**
2775  * Sets up the hardware to properly generate MSI-X interrupts
2776  * @hw
2777  *  board private structure
2778  */
2779 static void
2780 ngbe_configure_msix(struct rte_eth_dev *dev)
2781 {
2782 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2783 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2784 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2785 	uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2786 	uint32_t vec = NGBE_MISC_VEC_ID;
2787 	uint32_t gpie;
2788 
2789 	/*
2790 	 * Won't configure MSI-X register if no mapping is done
2791 	 * between intr vector and event fd
2792 	 * but if MSI-X has been enabled already, need to configure
2793 	 * auto clean, auto mask and throttling.
2794 	 */
2795 	gpie = rd32(hw, NGBE_GPIE);
2796 	if (!rte_intr_dp_is_en(intr_handle) &&
2797 	    !(gpie & NGBE_GPIE_MSIX))
2798 		return;
2799 
2800 	if (rte_intr_allow_others(intr_handle)) {
2801 		base = NGBE_RX_VEC_START;
2802 		vec = base;
2803 	}
2804 
2805 	/* setup GPIE for MSI-X mode */
2806 	gpie = rd32(hw, NGBE_GPIE);
2807 	gpie |= NGBE_GPIE_MSIX;
2808 	wr32(hw, NGBE_GPIE, gpie);
2809 
2810 	/* Populate the IVAR table and set the ITR values to the
2811 	 * corresponding register.
2812 	 */
2813 	if (rte_intr_dp_is_en(intr_handle)) {
2814 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2815 			queue_id++) {
2816 			/* by default, 1:1 mapping */
2817 			ngbe_set_ivar_map(hw, 0, queue_id, vec);
2818 			rte_intr_vec_list_index_set(intr_handle,
2819 							   queue_id, vec);
2820 			if (vec < base + rte_intr_nb_efd_get(intr_handle)
2821 			    - 1)
2822 				vec++;
2823 		}
2824 
2825 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2826 	}
2827 	wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2828 			NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2829 			| NGBE_ITR_WRDSA);
2830 }
2831 
2832 static u8 *
2833 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2834 			u8 **mc_addr_ptr, u32 *vmdq)
2835 {
2836 	u8 *mc_addr;
2837 
2838 	*vmdq = 0;
2839 	mc_addr = *mc_addr_ptr;
2840 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2841 	return mc_addr;
2842 }
2843 
2844 int
2845 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2846 			  struct rte_ether_addr *mc_addr_set,
2847 			  uint32_t nb_mc_addr)
2848 {
2849 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2850 	u8 *mc_addr_list;
2851 
2852 	mc_addr_list = (u8 *)mc_addr_set;
2853 	return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2854 					 ngbe_dev_addr_list_itr, TRUE);
2855 }
2856 
2857 static uint64_t
2858 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
2859 {
2860 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2861 	uint64_t systime_cycles;
2862 
2863 	systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
2864 	systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
2865 
2866 	return systime_cycles;
2867 }
2868 
2869 static uint64_t
2870 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2871 {
2872 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2873 	uint64_t rx_tstamp_cycles;
2874 
2875 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
2876 	rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
2877 	rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
2878 
2879 	return rx_tstamp_cycles;
2880 }
2881 
2882 static uint64_t
2883 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2884 {
2885 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2886 	uint64_t tx_tstamp_cycles;
2887 
2888 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
2889 	tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
2890 	tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
2891 
2892 	return tx_tstamp_cycles;
2893 }
2894 
2895 static void
2896 ngbe_start_timecounters(struct rte_eth_dev *dev)
2897 {
2898 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2899 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2900 	uint32_t incval = 0;
2901 	uint32_t shift = 0;
2902 
2903 	incval = NGBE_INCVAL_1GB;
2904 	shift = NGBE_INCVAL_SHIFT_1GB;
2905 
2906 	wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
2907 
2908 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
2909 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2910 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2911 
2912 	adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2913 	adapter->systime_tc.cc_shift = shift;
2914 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
2915 
2916 	adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2917 	adapter->rx_tstamp_tc.cc_shift = shift;
2918 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2919 
2920 	adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2921 	adapter->tx_tstamp_tc.cc_shift = shift;
2922 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2923 }
2924 
2925 static int
2926 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2927 {
2928 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2929 
2930 	adapter->systime_tc.nsec += delta;
2931 	adapter->rx_tstamp_tc.nsec += delta;
2932 	adapter->tx_tstamp_tc.nsec += delta;
2933 
2934 	return 0;
2935 }
2936 
2937 static int
2938 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2939 {
2940 	uint64_t ns;
2941 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2942 
2943 	ns = rte_timespec_to_ns(ts);
2944 	/* Set the timecounters to a new value. */
2945 	adapter->systime_tc.nsec = ns;
2946 	adapter->rx_tstamp_tc.nsec = ns;
2947 	adapter->tx_tstamp_tc.nsec = ns;
2948 
2949 	return 0;
2950 }
2951 
2952 static int
2953 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2954 {
2955 	uint64_t ns, systime_cycles;
2956 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2957 
2958 	systime_cycles = ngbe_read_systime_cyclecounter(dev);
2959 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
2960 	*ts = rte_ns_to_timespec(ns);
2961 
2962 	return 0;
2963 }
2964 
2965 static int
2966 ngbe_timesync_enable(struct rte_eth_dev *dev)
2967 {
2968 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2969 	uint32_t tsync_ctl;
2970 
2971 	/* Stop the timesync system time. */
2972 	wr32(hw, NGBE_TSTIMEINC, 0x0);
2973 	/* Reset the timesync system time value. */
2974 	wr32(hw, NGBE_TSTIMEL, 0x0);
2975 	wr32(hw, NGBE_TSTIMEH, 0x0);
2976 
2977 	ngbe_start_timecounters(dev);
2978 
2979 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2980 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588),
2981 		RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588);
2982 
2983 	/* Enable timestamping of received PTP packets. */
2984 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2985 	tsync_ctl |= NGBE_TSRXCTL_ENA;
2986 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2987 
2988 	/* Enable timestamping of transmitted PTP packets. */
2989 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2990 	tsync_ctl |= NGBE_TSTXCTL_ENA;
2991 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2992 
2993 	ngbe_flush(hw);
2994 
2995 	return 0;
2996 }
2997 
2998 static int
2999 ngbe_timesync_disable(struct rte_eth_dev *dev)
3000 {
3001 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3002 	uint32_t tsync_ctl;
3003 
3004 	/* Disable timestamping of transmitted PTP packets. */
3005 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
3006 	tsync_ctl &= ~NGBE_TSTXCTL_ENA;
3007 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
3008 
3009 	/* Disable timestamping of received PTP packets. */
3010 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
3011 	tsync_ctl &= ~NGBE_TSRXCTL_ENA;
3012 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
3013 
3014 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3015 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
3016 
3017 	/* Stop incrementing the System Time registers. */
3018 	wr32(hw, NGBE_TSTIMEINC, 0);
3019 
3020 	return 0;
3021 }
3022 
3023 static int
3024 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3025 				 struct timespec *timestamp,
3026 				 uint32_t flags __rte_unused)
3027 {
3028 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3029 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
3030 	uint32_t tsync_rxctl;
3031 	uint64_t rx_tstamp_cycles;
3032 	uint64_t ns;
3033 
3034 	tsync_rxctl = rd32(hw, NGBE_TSRXCTL);
3035 	if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0)
3036 		return -EINVAL;
3037 
3038 	rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev);
3039 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
3040 	*timestamp = rte_ns_to_timespec(ns);
3041 
3042 	return  0;
3043 }
3044 
3045 static int
3046 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3047 				 struct timespec *timestamp)
3048 {
3049 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3050 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
3051 	uint32_t tsync_txctl;
3052 	uint64_t tx_tstamp_cycles;
3053 	uint64_t ns;
3054 
3055 	tsync_txctl = rd32(hw, NGBE_TSTXCTL);
3056 	if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0)
3057 		return -EINVAL;
3058 
3059 	tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev);
3060 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
3061 	*timestamp = rte_ns_to_timespec(ns);
3062 
3063 	return 0;
3064 }
3065 
3066 static int
3067 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
3068 {
3069 	int count = 0;
3070 	int g_ind = 0;
3071 	const struct reg_info *reg_group;
3072 	const struct reg_info **reg_set = ngbe_regs_others;
3073 
3074 	while ((reg_group = reg_set[g_ind++]))
3075 		count += ngbe_regs_group_count(reg_group);
3076 
3077 	return count;
3078 }
3079 
3080 static int
3081 ngbe_get_regs(struct rte_eth_dev *dev,
3082 	      struct rte_dev_reg_info *regs)
3083 {
3084 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3085 	uint32_t *data = regs->data;
3086 	int g_ind = 0;
3087 	int count = 0;
3088 	const struct reg_info *reg_group;
3089 	const struct reg_info **reg_set = ngbe_regs_others;
3090 
3091 	if (data == NULL) {
3092 		regs->length = ngbe_get_reg_length(dev);
3093 		regs->width = sizeof(uint32_t);
3094 		return 0;
3095 	}
3096 
3097 	/* Support only full register dump */
3098 	if (regs->length == 0 ||
3099 	    regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
3100 		regs->version = hw->mac.type << 24 |
3101 				hw->revision_id << 16 |
3102 				hw->device_id;
3103 		while ((reg_group = reg_set[g_ind++]))
3104 			count += ngbe_read_regs_group(dev, &data[count],
3105 						      reg_group);
3106 		return 0;
3107 	}
3108 
3109 	return -ENOTSUP;
3110 }
3111 
3112 static int
3113 ngbe_get_eeprom_length(struct rte_eth_dev *dev)
3114 {
3115 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3116 
3117 	/* Return unit is byte count */
3118 	return hw->rom.word_size * 2;
3119 }
3120 
3121 static int
3122 ngbe_get_eeprom(struct rte_eth_dev *dev,
3123 		struct rte_dev_eeprom_info *in_eeprom)
3124 {
3125 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3126 	struct ngbe_rom_info *eeprom = &hw->rom;
3127 	uint16_t *data = in_eeprom->data;
3128 	int first, length;
3129 
3130 	first = in_eeprom->offset >> 1;
3131 	length = in_eeprom->length >> 1;
3132 	if (first > hw->rom.word_size ||
3133 	    ((first + length) > hw->rom.word_size))
3134 		return -EINVAL;
3135 
3136 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3137 
3138 	return eeprom->readw_buffer(hw, first, length, data);
3139 }
3140 
3141 static int
3142 ngbe_set_eeprom(struct rte_eth_dev *dev,
3143 		struct rte_dev_eeprom_info *in_eeprom)
3144 {
3145 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3146 	struct ngbe_rom_info *eeprom = &hw->rom;
3147 	uint16_t *data = in_eeprom->data;
3148 	int first, length;
3149 
3150 	first = in_eeprom->offset >> 1;
3151 	length = in_eeprom->length >> 1;
3152 	if (first > hw->rom.word_size ||
3153 	    ((first + length) > hw->rom.word_size))
3154 		return -EINVAL;
3155 
3156 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3157 
3158 	return eeprom->writew_buffer(hw,  first, length, data);
3159 }
3160 
3161 static const struct eth_dev_ops ngbe_eth_dev_ops = {
3162 	.dev_configure              = ngbe_dev_configure,
3163 	.dev_infos_get              = ngbe_dev_info_get,
3164 	.dev_start                  = ngbe_dev_start,
3165 	.dev_stop                   = ngbe_dev_stop,
3166 	.dev_set_link_up            = ngbe_dev_set_link_up,
3167 	.dev_set_link_down          = ngbe_dev_set_link_down,
3168 	.dev_close                  = ngbe_dev_close,
3169 	.dev_reset                  = ngbe_dev_reset,
3170 	.promiscuous_enable         = ngbe_dev_promiscuous_enable,
3171 	.promiscuous_disable        = ngbe_dev_promiscuous_disable,
3172 	.allmulticast_enable        = ngbe_dev_allmulticast_enable,
3173 	.allmulticast_disable       = ngbe_dev_allmulticast_disable,
3174 	.link_update                = ngbe_dev_link_update,
3175 	.stats_get                  = ngbe_dev_stats_get,
3176 	.xstats_get                 = ngbe_dev_xstats_get,
3177 	.xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
3178 	.stats_reset                = ngbe_dev_stats_reset,
3179 	.xstats_reset               = ngbe_dev_xstats_reset,
3180 	.xstats_get_names           = ngbe_dev_xstats_get_names,
3181 	.xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
3182 	.fw_version_get             = ngbe_fw_version_get,
3183 	.dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
3184 	.mtu_set                    = ngbe_dev_mtu_set,
3185 	.vlan_filter_set            = ngbe_vlan_filter_set,
3186 	.vlan_tpid_set              = ngbe_vlan_tpid_set,
3187 	.vlan_offload_set           = ngbe_vlan_offload_set,
3188 	.vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
3189 	.rx_queue_start	            = ngbe_dev_rx_queue_start,
3190 	.rx_queue_stop              = ngbe_dev_rx_queue_stop,
3191 	.tx_queue_start	            = ngbe_dev_tx_queue_start,
3192 	.tx_queue_stop              = ngbe_dev_tx_queue_stop,
3193 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
3194 	.rx_queue_release           = ngbe_dev_rx_queue_release,
3195 	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
3196 	.tx_queue_release           = ngbe_dev_tx_queue_release,
3197 	.dev_led_on                 = ngbe_dev_led_on,
3198 	.dev_led_off                = ngbe_dev_led_off,
3199 	.flow_ctrl_get              = ngbe_flow_ctrl_get,
3200 	.flow_ctrl_set              = ngbe_flow_ctrl_set,
3201 	.mac_addr_add               = ngbe_add_rar,
3202 	.mac_addr_remove            = ngbe_remove_rar,
3203 	.mac_addr_set               = ngbe_set_default_mac_addr,
3204 	.uc_hash_table_set          = ngbe_uc_hash_table_set,
3205 	.uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
3206 	.reta_update                = ngbe_dev_rss_reta_update,
3207 	.reta_query                 = ngbe_dev_rss_reta_query,
3208 	.rss_hash_update            = ngbe_dev_rss_hash_update,
3209 	.rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
3210 	.set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
3211 	.rxq_info_get               = ngbe_rxq_info_get,
3212 	.txq_info_get               = ngbe_txq_info_get,
3213 	.rx_burst_mode_get          = ngbe_rx_burst_mode_get,
3214 	.tx_burst_mode_get          = ngbe_tx_burst_mode_get,
3215 	.timesync_enable            = ngbe_timesync_enable,
3216 	.timesync_disable           = ngbe_timesync_disable,
3217 	.timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
3218 	.timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp,
3219 	.get_reg                    = ngbe_get_regs,
3220 	.get_eeprom_length          = ngbe_get_eeprom_length,
3221 	.get_eeprom                 = ngbe_get_eeprom,
3222 	.set_eeprom                 = ngbe_set_eeprom,
3223 	.timesync_adjust_time       = ngbe_timesync_adjust_time,
3224 	.timesync_read_time         = ngbe_timesync_read_time,
3225 	.timesync_write_time        = ngbe_timesync_write_time,
3226 	.tx_done_cleanup            = ngbe_dev_tx_done_cleanup,
3227 };
3228 
3229 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
3230 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
3231 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
3232 
3233 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
3234 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
3235 
3236 #ifdef RTE_ETHDEV_DEBUG_RX
3237 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
3238 #endif
3239 #ifdef RTE_ETHDEV_DEBUG_TX
3240 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
3241 #endif
3242