xref: /dpdk/drivers/net/ngbe/ngbe_ethdev.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9 
10 #include <rte_alarm.h>
11 
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16 #include "ngbe_regs_group.h"
17 
18 static const struct reg_info ngbe_regs_general[] = {
19 	{NGBE_RST, 1, 1, "NGBE_RST"},
20 	{NGBE_STAT, 1, 1, "NGBE_STAT"},
21 	{NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
22 	{NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
23 	{NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
24 	{NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
25 	{0, 0, 0, ""}
26 };
27 
28 static const struct reg_info ngbe_regs_nvm[] = {
29 	{0, 0, 0, ""}
30 };
31 
32 static const struct reg_info ngbe_regs_interrupt[] = {
33 	{0, 0, 0, ""}
34 };
35 
36 static const struct reg_info ngbe_regs_fctl_others[] = {
37 	{0, 0, 0, ""}
38 };
39 
40 static const struct reg_info ngbe_regs_rxdma[] = {
41 	{0, 0, 0, ""}
42 };
43 
44 static const struct reg_info ngbe_regs_rx[] = {
45 	{0, 0, 0, ""}
46 };
47 
48 static struct reg_info ngbe_regs_tx[] = {
49 	{0, 0, 0, ""}
50 };
51 
52 static const struct reg_info ngbe_regs_wakeup[] = {
53 	{0, 0, 0, ""}
54 };
55 
56 static const struct reg_info ngbe_regs_mac[] = {
57 	{0, 0, 0, ""}
58 };
59 
60 static const struct reg_info ngbe_regs_diagnostic[] = {
61 	{0, 0, 0, ""},
62 };
63 
64 /* PF registers */
65 static const struct reg_info *ngbe_regs_others[] = {
66 				ngbe_regs_general,
67 				ngbe_regs_nvm,
68 				ngbe_regs_interrupt,
69 				ngbe_regs_fctl_others,
70 				ngbe_regs_rxdma,
71 				ngbe_regs_rx,
72 				ngbe_regs_tx,
73 				ngbe_regs_wakeup,
74 				ngbe_regs_mac,
75 				ngbe_regs_diagnostic,
76 				NULL};
77 
78 static int ngbe_dev_close(struct rte_eth_dev *dev);
79 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
80 				int wait_to_complete);
81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
84 					uint16_t queue);
85 
86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
91 static void ngbe_dev_interrupt_handler(void *param);
92 static void ngbe_configure_msix(struct rte_eth_dev *dev);
93 
94 #define NGBE_SET_HWSTRIP(h, q) do {\
95 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
96 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
97 		(h)->bitmap[idx] |= 1 << bit;\
98 	} while (0)
99 
100 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
101 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
102 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
103 		(h)->bitmap[idx] &= ~(1 << bit);\
104 	} while (0)
105 
106 #define NGBE_GET_HWSTRIP(h, q, r) do {\
107 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
108 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
109 		(r) = (h)->bitmap[idx] >> bit & 1;\
110 	} while (0)
111 
112 /*
113  * The set of PCI devices this driver supports
114  */
115 static const struct rte_pci_id pci_id_ngbe_map[] = {
116 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
117 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
118 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
119 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
120 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
121 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
122 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
123 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
124 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
128 	{ .vendor_id = 0, /* sentinel */ },
129 };
130 
131 static const struct rte_eth_desc_lim rx_desc_lim = {
132 	.nb_max = NGBE_RING_DESC_MAX,
133 	.nb_min = NGBE_RING_DESC_MIN,
134 	.nb_align = NGBE_RXD_ALIGN,
135 };
136 
137 static const struct rte_eth_desc_lim tx_desc_lim = {
138 	.nb_max = NGBE_RING_DESC_MAX,
139 	.nb_min = NGBE_RING_DESC_MIN,
140 	.nb_align = NGBE_TXD_ALIGN,
141 	.nb_seg_max = NGBE_TX_MAX_SEG,
142 	.nb_mtu_seg_max = NGBE_TX_MAX_SEG,
143 };
144 
145 static const struct eth_dev_ops ngbe_eth_dev_ops;
146 
147 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
148 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
149 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
150 	/* MNG RxTx */
151 	HW_XSTAT(mng_bmc2host_packets),
152 	HW_XSTAT(mng_host2bmc_packets),
153 	/* Basic RxTx */
154 	HW_XSTAT(rx_packets),
155 	HW_XSTAT(tx_packets),
156 	HW_XSTAT(rx_bytes),
157 	HW_XSTAT(tx_bytes),
158 	HW_XSTAT(rx_total_bytes),
159 	HW_XSTAT(rx_total_packets),
160 	HW_XSTAT(tx_total_packets),
161 	HW_XSTAT(rx_total_missed_packets),
162 	HW_XSTAT(rx_broadcast_packets),
163 	HW_XSTAT(rx_multicast_packets),
164 	HW_XSTAT(rx_management_packets),
165 	HW_XSTAT(tx_management_packets),
166 	HW_XSTAT(rx_management_dropped),
167 	HW_XSTAT(rx_dma_drop),
168 	HW_XSTAT(tx_dma_drop),
169 	HW_XSTAT(tx_secdrp_packets),
170 
171 	/* Basic Error */
172 	HW_XSTAT(rx_crc_errors),
173 	HW_XSTAT(rx_illegal_byte_errors),
174 	HW_XSTAT(rx_error_bytes),
175 	HW_XSTAT(rx_mac_short_packet_dropped),
176 	HW_XSTAT(rx_length_errors),
177 	HW_XSTAT(rx_undersize_errors),
178 	HW_XSTAT(rx_fragment_errors),
179 	HW_XSTAT(rx_oversize_cnt),
180 	HW_XSTAT(rx_jabber_errors),
181 	HW_XSTAT(rx_l3_l4_xsum_error),
182 	HW_XSTAT(mac_local_errors),
183 	HW_XSTAT(mac_remote_errors),
184 
185 	/* PB Stats */
186 	HW_XSTAT(rx_up_dropped),
187 	HW_XSTAT(rdb_pkt_cnt),
188 	HW_XSTAT(rdb_repli_cnt),
189 	HW_XSTAT(rdb_drp_cnt),
190 
191 	/* MACSEC */
192 	HW_XSTAT(tx_macsec_pkts_untagged),
193 	HW_XSTAT(tx_macsec_pkts_encrypted),
194 	HW_XSTAT(tx_macsec_pkts_protected),
195 	HW_XSTAT(tx_macsec_octets_encrypted),
196 	HW_XSTAT(tx_macsec_octets_protected),
197 	HW_XSTAT(rx_macsec_pkts_untagged),
198 	HW_XSTAT(rx_macsec_pkts_badtag),
199 	HW_XSTAT(rx_macsec_pkts_nosci),
200 	HW_XSTAT(rx_macsec_pkts_unknownsci),
201 	HW_XSTAT(rx_macsec_octets_decrypted),
202 	HW_XSTAT(rx_macsec_octets_validated),
203 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
204 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
205 	HW_XSTAT(rx_macsec_sc_pkts_late),
206 	HW_XSTAT(rx_macsec_sa_pkts_ok),
207 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
208 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
209 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
210 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
211 
212 	/* MAC RxTx */
213 	HW_XSTAT(rx_size_64_packets),
214 	HW_XSTAT(rx_size_65_to_127_packets),
215 	HW_XSTAT(rx_size_128_to_255_packets),
216 	HW_XSTAT(rx_size_256_to_511_packets),
217 	HW_XSTAT(rx_size_512_to_1023_packets),
218 	HW_XSTAT(rx_size_1024_to_max_packets),
219 	HW_XSTAT(tx_size_64_packets),
220 	HW_XSTAT(tx_size_65_to_127_packets),
221 	HW_XSTAT(tx_size_128_to_255_packets),
222 	HW_XSTAT(tx_size_256_to_511_packets),
223 	HW_XSTAT(tx_size_512_to_1023_packets),
224 	HW_XSTAT(tx_size_1024_to_max_packets),
225 
226 	/* Flow Control */
227 	HW_XSTAT(tx_xon_packets),
228 	HW_XSTAT(rx_xon_packets),
229 	HW_XSTAT(tx_xoff_packets),
230 	HW_XSTAT(rx_xoff_packets),
231 
232 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
233 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
234 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
235 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
236 };
237 
238 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
239 			   sizeof(rte_ngbe_stats_strings[0]))
240 
241 /* Per-queue statistics */
242 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
243 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
244 	QP_XSTAT(rx_qp_packets),
245 	QP_XSTAT(tx_qp_packets),
246 	QP_XSTAT(rx_qp_bytes),
247 	QP_XSTAT(tx_qp_bytes),
248 	QP_XSTAT(rx_qp_mc_packets),
249 };
250 
251 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
252 			   sizeof(rte_ngbe_qp_strings[0]))
253 
254 static inline int32_t
255 ngbe_pf_reset_hw(struct ngbe_hw *hw)
256 {
257 	uint32_t ctrl_ext;
258 	int32_t status;
259 
260 	status = hw->mac.reset_hw(hw);
261 
262 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
263 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
264 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
265 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
266 	ngbe_flush(hw);
267 
268 	if (status == NGBE_ERR_SFP_NOT_PRESENT)
269 		status = 0;
270 	return status;
271 }
272 
273 static inline void
274 ngbe_enable_intr(struct rte_eth_dev *dev)
275 {
276 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
277 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
278 
279 	wr32(hw, NGBE_IENMISC, intr->mask_misc);
280 	wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
281 	ngbe_flush(hw);
282 }
283 
284 static void
285 ngbe_disable_intr(struct ngbe_hw *hw)
286 {
287 	PMD_INIT_FUNC_TRACE();
288 
289 	wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
290 	ngbe_flush(hw);
291 }
292 
293 /*
294  * Ensure that all locks are released before first NVM or PHY access
295  */
296 static void
297 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
298 {
299 	uint16_t mask;
300 
301 	/*
302 	 * These ones are more tricky since they are common to all ports; but
303 	 * swfw_sync retries last long enough (1s) to be almost sure that if
304 	 * lock can not be taken it is due to an improper lock of the
305 	 * semaphore.
306 	 */
307 	mask = NGBE_MNGSEM_SWPHY |
308 	       NGBE_MNGSEM_SWMBX |
309 	       NGBE_MNGSEM_SWFLASH;
310 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
311 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
312 
313 	hw->mac.release_swfw_sync(hw, mask);
314 }
315 
316 static int
317 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
318 {
319 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
320 	struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
321 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
322 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
323 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
324 	const struct rte_memzone *mz;
325 	uint32_t ctrl_ext;
326 	u32 led_conf = 0;
327 	int err, ret;
328 
329 	PMD_INIT_FUNC_TRACE();
330 
331 	eth_dev->dev_ops = &ngbe_eth_dev_ops;
332 	eth_dev->rx_queue_count       = ngbe_dev_rx_queue_count;
333 	eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
334 	eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
335 	eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
336 	eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
337 	eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
338 
339 	/*
340 	 * For secondary processes, we don't initialise any further as primary
341 	 * has already done this work. Only check we don't need a different
342 	 * Rx and Tx function.
343 	 */
344 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
345 		struct ngbe_tx_queue *txq;
346 		/* Tx queue function in primary, set by last queue initialized
347 		 * Tx queue may not initialized by primary process
348 		 */
349 		if (eth_dev->data->tx_queues) {
350 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
351 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
352 			ngbe_set_tx_function(eth_dev, txq);
353 		} else {
354 			/* Use default Tx function if we get here */
355 			PMD_INIT_LOG(NOTICE,
356 				"No Tx queues configured yet. Using default Tx function.");
357 		}
358 
359 		ngbe_set_rx_function(eth_dev);
360 
361 		return 0;
362 	}
363 
364 	rte_eth_copy_pci_info(eth_dev, pci_dev);
365 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
366 
367 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
368 
369 	/* Vendor and Device ID need to be set before init of shared code */
370 	hw->back = pci_dev;
371 	hw->device_id = pci_dev->id.device_id;
372 	hw->vendor_id = pci_dev->id.vendor_id;
373 	if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) {
374 		hw->sub_system_id = pci_dev->id.subsystem_device_id;
375 	} else {
376 		u32 ssid;
377 
378 		ssid = ngbe_flash_read_dword(hw, 0xFFFDC);
379 		if (ssid == 0x1) {
380 			PMD_INIT_LOG(ERR,
381 				"Read of internal subsystem device id failed\n");
382 			return -ENODEV;
383 		}
384 		hw->sub_system_id = (u16)ssid >> 8 | (u16)ssid << 8;
385 	}
386 	ngbe_map_device_id(hw);
387 
388 	/* Reserve memory for interrupt status block */
389 	mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
390 		NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
391 	if (mz == NULL)
392 		return -ENOMEM;
393 
394 	hw->isb_dma = TMZ_PADDR(mz);
395 	hw->isb_mem = TMZ_VADDR(mz);
396 
397 	/* Initialize the shared code (base driver) */
398 	err = ngbe_init_shared_code(hw);
399 	if (err != 0) {
400 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
401 		return -EIO;
402 	}
403 
404 	/* Unlock any pending hardware semaphore */
405 	ngbe_swfw_lock_reset(hw);
406 
407 	/* Get Hardware Flow Control setting */
408 	hw->fc.requested_mode = ngbe_fc_full;
409 	hw->fc.current_mode = ngbe_fc_full;
410 	hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
411 	hw->fc.low_water = NGBE_FC_XON_LOTH;
412 	hw->fc.high_water = NGBE_FC_XOFF_HITH;
413 	hw->fc.send_xon = 1;
414 
415 	err = hw->rom.init_params(hw);
416 	if (err != 0) {
417 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
418 		return -EIO;
419 	}
420 
421 	/* Make sure we have a good EEPROM before we read from it */
422 	err = hw->rom.validate_checksum(hw, NULL);
423 	if (err != 0) {
424 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
425 		return -EIO;
426 	}
427 
428 	err = hw->phy.led_oem_chk(hw, &led_conf);
429 	if (err == 0)
430 		hw->led_conf = led_conf;
431 	else
432 		hw->led_conf = 0xFFFF;
433 
434 	err = hw->mac.init_hw(hw);
435 	if (err != 0) {
436 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
437 		return -EIO;
438 	}
439 
440 	/* Reset the hw statistics */
441 	ngbe_dev_stats_reset(eth_dev);
442 
443 	/* disable interrupt */
444 	ngbe_disable_intr(hw);
445 
446 	/* Allocate memory for storing MAC addresses */
447 	eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
448 					       hw->mac.num_rar_entries, 0);
449 	if (eth_dev->data->mac_addrs == NULL) {
450 		PMD_INIT_LOG(ERR,
451 			     "Failed to allocate %u bytes needed to store MAC addresses",
452 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
453 		return -ENOMEM;
454 	}
455 
456 	/* Copy the permanent MAC address */
457 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
458 			&eth_dev->data->mac_addrs[0]);
459 
460 	/* Allocate memory for storing hash filter MAC addresses */
461 	eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
462 			RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
463 	if (eth_dev->data->hash_mac_addrs == NULL) {
464 		PMD_INIT_LOG(ERR,
465 			     "Failed to allocate %d bytes needed to store MAC addresses",
466 			     RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
467 		rte_free(eth_dev->data->mac_addrs);
468 		eth_dev->data->mac_addrs = NULL;
469 		return -ENOMEM;
470 	}
471 
472 	/* initialize the vfta */
473 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
474 
475 	/* initialize the hw strip bitmap*/
476 	memset(hwstrip, 0, sizeof(*hwstrip));
477 
478 	/* initialize PF if max_vfs not zero */
479 	ret = ngbe_pf_host_init(eth_dev);
480 	if (ret) {
481 		rte_free(eth_dev->data->mac_addrs);
482 		eth_dev->data->mac_addrs = NULL;
483 		rte_free(eth_dev->data->hash_mac_addrs);
484 		eth_dev->data->hash_mac_addrs = NULL;
485 		return ret;
486 	}
487 
488 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
489 	/* let hardware know driver is loaded */
490 	ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
491 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
492 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
493 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
494 	ngbe_flush(hw);
495 
496 	PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
497 			(int)hw->mac.type, (int)hw->phy.type);
498 
499 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
500 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
501 		     pci_dev->id.device_id);
502 
503 	rte_intr_callback_register(intr_handle,
504 				   ngbe_dev_interrupt_handler, eth_dev);
505 
506 	/* enable uio/vfio intr/eventfd mapping */
507 	rte_intr_enable(intr_handle);
508 
509 	/* enable support intr */
510 	ngbe_enable_intr(eth_dev);
511 
512 	return 0;
513 }
514 
515 static int
516 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
517 {
518 	PMD_INIT_FUNC_TRACE();
519 
520 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
521 		return 0;
522 
523 	ngbe_dev_close(eth_dev);
524 
525 	return 0;
526 }
527 
528 static int
529 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
530 		struct rte_pci_device *pci_dev)
531 {
532 	return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
533 			sizeof(struct ngbe_adapter),
534 			eth_dev_pci_specific_init, pci_dev,
535 			eth_ngbe_dev_init, NULL);
536 }
537 
538 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
539 {
540 	struct rte_eth_dev *ethdev;
541 
542 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
543 	if (ethdev == NULL)
544 		return 0;
545 
546 	return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
547 }
548 
549 static struct rte_pci_driver rte_ngbe_pmd = {
550 	.id_table = pci_id_ngbe_map,
551 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
552 		     RTE_PCI_DRV_INTR_LSC,
553 	.probe = eth_ngbe_pci_probe,
554 	.remove = eth_ngbe_pci_remove,
555 };
556 
557 static int
558 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
559 {
560 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
561 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
562 	uint32_t vfta;
563 	uint32_t vid_idx;
564 	uint32_t vid_bit;
565 
566 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
567 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
568 	vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
569 	if (on)
570 		vfta |= vid_bit;
571 	else
572 		vfta &= ~vid_bit;
573 	wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
574 
575 	/* update local VFTA copy */
576 	shadow_vfta->vfta[vid_idx] = vfta;
577 
578 	return 0;
579 }
580 
581 static void
582 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
583 {
584 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
585 	struct ngbe_rx_queue *rxq;
586 	bool restart;
587 	uint32_t rxcfg, rxbal, rxbah;
588 
589 	if (on)
590 		ngbe_vlan_hw_strip_enable(dev, queue);
591 	else
592 		ngbe_vlan_hw_strip_disable(dev, queue);
593 
594 	rxq = dev->data->rx_queues[queue];
595 	rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
596 	rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
597 	rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
598 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
599 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
600 			!(rxcfg & NGBE_RXCFG_VLAN);
601 		rxcfg |= NGBE_RXCFG_VLAN;
602 	} else {
603 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
604 			(rxcfg & NGBE_RXCFG_VLAN);
605 		rxcfg &= ~NGBE_RXCFG_VLAN;
606 	}
607 	rxcfg &= ~NGBE_RXCFG_ENA;
608 
609 	if (restart) {
610 		/* set vlan strip for ring */
611 		ngbe_dev_rx_queue_stop(dev, queue);
612 		wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
613 		wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
614 		wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
615 		ngbe_dev_rx_queue_start(dev, queue);
616 	}
617 }
618 
619 static int
620 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
621 		    enum rte_vlan_type vlan_type,
622 		    uint16_t tpid)
623 {
624 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
625 	int ret = 0;
626 	uint32_t portctrl, vlan_ext, qinq;
627 
628 	portctrl = rd32(hw, NGBE_PORTCTL);
629 
630 	vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
631 	qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
632 	switch (vlan_type) {
633 	case RTE_ETH_VLAN_TYPE_INNER:
634 		if (vlan_ext) {
635 			wr32m(hw, NGBE_VLANCTL,
636 				NGBE_VLANCTL_TPID_MASK,
637 				NGBE_VLANCTL_TPID(tpid));
638 			wr32m(hw, NGBE_DMATXCTRL,
639 				NGBE_DMATXCTRL_TPID_MASK,
640 				NGBE_DMATXCTRL_TPID(tpid));
641 		} else {
642 			ret = -ENOTSUP;
643 			PMD_DRV_LOG(ERR,
644 				"Inner type is not supported by single VLAN");
645 		}
646 
647 		if (qinq) {
648 			wr32m(hw, NGBE_TAGTPID(0),
649 				NGBE_TAGTPID_LSB_MASK,
650 				NGBE_TAGTPID_LSB(tpid));
651 		}
652 		break;
653 	case RTE_ETH_VLAN_TYPE_OUTER:
654 		if (vlan_ext) {
655 			/* Only the high 16-bits is valid */
656 			wr32m(hw, NGBE_EXTAG,
657 				NGBE_EXTAG_VLAN_MASK,
658 				NGBE_EXTAG_VLAN(tpid));
659 		} else {
660 			wr32m(hw, NGBE_VLANCTL,
661 				NGBE_VLANCTL_TPID_MASK,
662 				NGBE_VLANCTL_TPID(tpid));
663 			wr32m(hw, NGBE_DMATXCTRL,
664 				NGBE_DMATXCTRL_TPID_MASK,
665 				NGBE_DMATXCTRL_TPID(tpid));
666 		}
667 
668 		if (qinq) {
669 			wr32m(hw, NGBE_TAGTPID(0),
670 				NGBE_TAGTPID_MSB_MASK,
671 				NGBE_TAGTPID_MSB(tpid));
672 		}
673 		break;
674 	default:
675 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
676 		return -EINVAL;
677 	}
678 
679 	return ret;
680 }
681 
682 void
683 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
684 {
685 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
686 	uint32_t vlnctrl;
687 
688 	PMD_INIT_FUNC_TRACE();
689 
690 	/* Filter Table Disable */
691 	vlnctrl = rd32(hw, NGBE_VLANCTL);
692 	vlnctrl &= ~NGBE_VLANCTL_VFE;
693 	wr32(hw, NGBE_VLANCTL, vlnctrl);
694 }
695 
696 void
697 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
698 {
699 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
700 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
701 	uint32_t vlnctrl;
702 	uint16_t i;
703 
704 	PMD_INIT_FUNC_TRACE();
705 
706 	/* Filter Table Enable */
707 	vlnctrl = rd32(hw, NGBE_VLANCTL);
708 	vlnctrl &= ~NGBE_VLANCTL_CFIENA;
709 	vlnctrl |= NGBE_VLANCTL_VFE;
710 	wr32(hw, NGBE_VLANCTL, vlnctrl);
711 
712 	/* write whatever is in local vfta copy */
713 	for (i = 0; i < NGBE_VFTA_SIZE; i++)
714 		wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
715 }
716 
717 void
718 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
719 {
720 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
721 	struct ngbe_rx_queue *rxq;
722 
723 	if (queue >= NGBE_MAX_RX_QUEUE_NUM)
724 		return;
725 
726 	if (on)
727 		NGBE_SET_HWSTRIP(hwstrip, queue);
728 	else
729 		NGBE_CLEAR_HWSTRIP(hwstrip, queue);
730 
731 	if (queue >= dev->data->nb_rx_queues)
732 		return;
733 
734 	rxq = dev->data->rx_queues[queue];
735 
736 	if (on) {
737 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
738 		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
739 	} else {
740 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
741 		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
742 	}
743 }
744 
745 static void
746 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
747 {
748 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
749 	uint32_t ctrl;
750 
751 	PMD_INIT_FUNC_TRACE();
752 
753 	ctrl = rd32(hw, NGBE_RXCFG(queue));
754 	ctrl &= ~NGBE_RXCFG_VLAN;
755 	wr32(hw, NGBE_RXCFG(queue), ctrl);
756 
757 	/* record those setting for HW strip per queue */
758 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
759 }
760 
761 static void
762 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
763 {
764 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
765 	uint32_t ctrl;
766 
767 	PMD_INIT_FUNC_TRACE();
768 
769 	ctrl = rd32(hw, NGBE_RXCFG(queue));
770 	ctrl |= NGBE_RXCFG_VLAN;
771 	wr32(hw, NGBE_RXCFG(queue), ctrl);
772 
773 	/* record those setting for HW strip per queue */
774 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
775 }
776 
777 static void
778 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
779 {
780 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
781 	uint32_t ctrl;
782 
783 	PMD_INIT_FUNC_TRACE();
784 
785 	ctrl = rd32(hw, NGBE_PORTCTL);
786 	ctrl &= ~NGBE_PORTCTL_VLANEXT;
787 	ctrl &= ~NGBE_PORTCTL_QINQ;
788 	wr32(hw, NGBE_PORTCTL, ctrl);
789 }
790 
791 static void
792 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
793 {
794 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
795 	uint32_t ctrl;
796 
797 	PMD_INIT_FUNC_TRACE();
798 
799 	ctrl  = rd32(hw, NGBE_PORTCTL);
800 	ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
801 	wr32(hw, NGBE_PORTCTL, ctrl);
802 }
803 
804 static void
805 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
806 {
807 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
808 	uint32_t ctrl;
809 
810 	PMD_INIT_FUNC_TRACE();
811 
812 	ctrl = rd32(hw, NGBE_PORTCTL);
813 	ctrl &= ~NGBE_PORTCTL_QINQ;
814 	wr32(hw, NGBE_PORTCTL, ctrl);
815 }
816 
817 static void
818 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
819 {
820 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
821 	uint32_t ctrl;
822 
823 	PMD_INIT_FUNC_TRACE();
824 
825 	ctrl  = rd32(hw, NGBE_PORTCTL);
826 	ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
827 	wr32(hw, NGBE_PORTCTL, ctrl);
828 }
829 
830 void
831 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
832 {
833 	struct ngbe_rx_queue *rxq;
834 	uint16_t i;
835 
836 	PMD_INIT_FUNC_TRACE();
837 
838 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
839 		rxq = dev->data->rx_queues[i];
840 
841 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
842 			ngbe_vlan_hw_strip_enable(dev, i);
843 		else
844 			ngbe_vlan_hw_strip_disable(dev, i);
845 	}
846 }
847 
848 void
849 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
850 {
851 	uint16_t i;
852 	struct rte_eth_rxmode *rxmode;
853 	struct ngbe_rx_queue *rxq;
854 
855 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
856 		rxmode = &dev->data->dev_conf.rxmode;
857 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
858 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
859 				rxq = dev->data->rx_queues[i];
860 				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
861 			}
862 		else
863 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
864 				rxq = dev->data->rx_queues[i];
865 				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
866 			}
867 	}
868 }
869 
870 static int
871 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
872 {
873 	struct rte_eth_rxmode *rxmode;
874 	rxmode = &dev->data->dev_conf.rxmode;
875 
876 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
877 		ngbe_vlan_hw_strip_config(dev);
878 
879 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
880 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
881 			ngbe_vlan_hw_filter_enable(dev);
882 		else
883 			ngbe_vlan_hw_filter_disable(dev);
884 	}
885 
886 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
887 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
888 			ngbe_vlan_hw_extend_enable(dev);
889 		else
890 			ngbe_vlan_hw_extend_disable(dev);
891 	}
892 
893 	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
894 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
895 			ngbe_qinq_hw_strip_enable(dev);
896 		else
897 			ngbe_qinq_hw_strip_disable(dev);
898 	}
899 
900 	return 0;
901 }
902 
903 static int
904 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
905 {
906 	ngbe_config_vlan_strip_on_all_queues(dev, mask);
907 
908 	ngbe_vlan_offload_config(dev, mask);
909 
910 	return 0;
911 }
912 
913 static int
914 ngbe_dev_configure(struct rte_eth_dev *dev)
915 {
916 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
917 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
918 
919 	PMD_INIT_FUNC_TRACE();
920 
921 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
922 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
923 
924 	/* set flag to update link status after init */
925 	intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
926 
927 	/*
928 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
929 	 * allocation Rx preconditions we will reset it.
930 	 */
931 	adapter->rx_bulk_alloc_allowed = true;
932 
933 	return 0;
934 }
935 
936 static void
937 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
938 {
939 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
940 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
941 
942 	wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
943 	wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
944 	wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
945 	if (hw->phy.type == ngbe_phy_yt8521s_sfi)
946 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
947 	else
948 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
949 
950 	intr->mask_misc |= NGBE_ICRMISC_GPIO | NGBE_ICRMISC_HEAT;
951 }
952 
953 /*
954  * Configure device link speed and setup link.
955  * It returns 0 on success.
956  */
957 static int
958 ngbe_dev_start(struct rte_eth_dev *dev)
959 {
960 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
961 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
962 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
963 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
964 	uint32_t intr_vector = 0;
965 	int err;
966 	bool link_up = false, negotiate = false;
967 	uint32_t speed = 0;
968 	uint32_t allowed_speeds = 0;
969 	int mask = 0;
970 	int status;
971 	uint32_t *link_speeds;
972 
973 	PMD_INIT_FUNC_TRACE();
974 
975 	/* Stop the link setup handler before resetting the HW. */
976 	rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev);
977 
978 	/* disable uio/vfio intr/eventfd mapping */
979 	rte_intr_disable(intr_handle);
980 
981 	/* stop adapter */
982 	hw->adapter_stopped = 0;
983 
984 	/* reinitialize adapter, this calls reset and start */
985 	hw->nb_rx_queues = dev->data->nb_rx_queues;
986 	hw->nb_tx_queues = dev->data->nb_tx_queues;
987 	status = ngbe_pf_reset_hw(hw);
988 	if (status != 0)
989 		return -1;
990 	hw->mac.start_hw(hw);
991 	hw->mac.get_link_status = true;
992 
993 	ngbe_set_pcie_master(hw, true);
994 
995 	/* configure PF module if SRIOV enabled */
996 	ngbe_pf_host_configure(dev);
997 
998 	ngbe_dev_phy_intr_setup(dev);
999 
1000 	/* check and configure queue intr-vector mapping */
1001 	if ((rte_intr_cap_multiple(intr_handle) ||
1002 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
1003 	    dev->data->dev_conf.intr_conf.rxq != 0) {
1004 		intr_vector = dev->data->nb_rx_queues;
1005 		if (rte_intr_efd_enable(intr_handle, intr_vector))
1006 			return -1;
1007 	}
1008 
1009 	if (rte_intr_dp_is_en(intr_handle)) {
1010 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
1011 						   dev->data->nb_rx_queues)) {
1012 			PMD_INIT_LOG(ERR,
1013 				     "Failed to allocate %d rx_queues intr_vec",
1014 				     dev->data->nb_rx_queues);
1015 			return -ENOMEM;
1016 		}
1017 	}
1018 
1019 	/* configure MSI-X for sleep until Rx interrupt */
1020 	ngbe_configure_msix(dev);
1021 
1022 	/* initialize transmission unit */
1023 	ngbe_dev_tx_init(dev);
1024 
1025 	/* This can fail when allocating mbufs for descriptor rings */
1026 	err = ngbe_dev_rx_init(dev);
1027 	if (err != 0) {
1028 		PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
1029 		goto error;
1030 	}
1031 
1032 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1033 		RTE_ETH_VLAN_EXTEND_MASK;
1034 	err = ngbe_vlan_offload_config(dev, mask);
1035 	if (err != 0) {
1036 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1037 		goto error;
1038 	}
1039 
1040 	hw->mac.setup_pba(hw);
1041 	ngbe_configure_port(dev);
1042 
1043 	err = ngbe_dev_rxtx_start(dev);
1044 	if (err < 0) {
1045 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1046 		goto error;
1047 	}
1048 
1049 	/* Skip link setup if loopback mode is enabled. */
1050 	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1051 		goto skip_link_setup;
1052 
1053 	err = hw->mac.check_link(hw, &speed, &link_up, 0);
1054 	if (err != 0)
1055 		goto error;
1056 	dev->data->dev_link.link_status = link_up;
1057 
1058 	link_speeds = &dev->data->dev_conf.link_speeds;
1059 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
1060 		negotiate = true;
1061 
1062 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1063 	if (err != 0)
1064 		goto error;
1065 
1066 	allowed_speeds = 0;
1067 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1068 		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
1069 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1070 		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
1071 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1072 		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
1073 
1074 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
1075 		PMD_INIT_LOG(ERR, "Invalid link setting");
1076 		goto error;
1077 	}
1078 
1079 	speed = 0x0;
1080 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1081 		speed = hw->mac.default_speeds;
1082 	} else {
1083 		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1084 			speed |= NGBE_LINK_SPEED_1GB_FULL;
1085 		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1086 			speed |= NGBE_LINK_SPEED_100M_FULL;
1087 		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
1088 			speed |= NGBE_LINK_SPEED_10M_FULL;
1089 	}
1090 
1091 	err = hw->phy.init_hw(hw);
1092 	if (err != 0) {
1093 		PMD_INIT_LOG(ERR, "PHY init failed");
1094 		goto error;
1095 	}
1096 	err = hw->mac.setup_link(hw, speed, link_up);
1097 	if (err != 0)
1098 		goto error;
1099 
1100 skip_link_setup:
1101 
1102 	if (rte_intr_allow_others(intr_handle)) {
1103 		ngbe_dev_misc_interrupt_setup(dev);
1104 		/* check if lsc interrupt is enabled */
1105 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1106 			ngbe_dev_lsc_interrupt_setup(dev, TRUE);
1107 		else
1108 			ngbe_dev_lsc_interrupt_setup(dev, FALSE);
1109 		ngbe_dev_macsec_interrupt_setup(dev);
1110 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1111 	} else {
1112 		rte_intr_callback_unregister(intr_handle,
1113 					     ngbe_dev_interrupt_handler, dev);
1114 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1115 			PMD_INIT_LOG(INFO,
1116 				     "LSC won't enable because of no intr multiplex");
1117 	}
1118 
1119 	/* check if rxq interrupt is enabled */
1120 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1121 	    rte_intr_dp_is_en(intr_handle))
1122 		ngbe_dev_rxq_interrupt_setup(dev);
1123 
1124 	/* enable UIO/VFIO intr/eventfd mapping */
1125 	rte_intr_enable(intr_handle);
1126 
1127 	/* resume enabled intr since HW reset */
1128 	ngbe_enable_intr(dev);
1129 
1130 	if (hw->gpio_ctl) {
1131 		/* gpio0 is used to power on/off control*/
1132 		wr32(hw, NGBE_GPIODATA, 0);
1133 	}
1134 
1135 	/*
1136 	 * Update link status right before return, because it may
1137 	 * start link configuration process in a separate thread.
1138 	 */
1139 	ngbe_dev_link_update(dev, 0);
1140 
1141 	ngbe_read_stats_registers(hw, hw_stats);
1142 	hw->offset_loaded = 1;
1143 
1144 	return 0;
1145 
1146 error:
1147 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1148 	ngbe_dev_clear_queues(dev);
1149 	return -EIO;
1150 }
1151 
1152 /*
1153  * Stop device: disable rx and tx functions to allow for reconfiguring.
1154  */
1155 static int
1156 ngbe_dev_stop(struct rte_eth_dev *dev)
1157 {
1158 	struct rte_eth_link link;
1159 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1160 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1161 	struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1162 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1163 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1164 	int vf;
1165 
1166 	if (hw->adapter_stopped)
1167 		return 0;
1168 
1169 	PMD_INIT_FUNC_TRACE();
1170 
1171 	rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev);
1172 
1173 	if (hw->gpio_ctl) {
1174 		/* gpio0 is used to power on/off control*/
1175 		wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1176 	}
1177 
1178 	/* disable interrupts */
1179 	ngbe_disable_intr(hw);
1180 
1181 	/* reset the NIC */
1182 	ngbe_pf_reset_hw(hw);
1183 	hw->adapter_stopped = 0;
1184 
1185 	/* stop adapter */
1186 	ngbe_stop_hw(hw);
1187 
1188 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1189 		vfinfo[vf].clear_to_send = false;
1190 
1191 	hw->phy.set_phy_power(hw, false);
1192 
1193 	ngbe_dev_clear_queues(dev);
1194 
1195 	/* Clear stored conf */
1196 	dev->data->scattered_rx = 0;
1197 
1198 	/* Clear recorded link status */
1199 	memset(&link, 0, sizeof(link));
1200 	rte_eth_linkstatus_set(dev, &link);
1201 
1202 	if (!rte_intr_allow_others(intr_handle))
1203 		/* resume to the default handler */
1204 		rte_intr_callback_register(intr_handle,
1205 					   ngbe_dev_interrupt_handler,
1206 					   (void *)dev);
1207 
1208 	/* Clean datapath event and queue/vec mapping */
1209 	rte_intr_efd_disable(intr_handle);
1210 	rte_intr_vec_list_free(intr_handle);
1211 
1212 	ngbe_set_pcie_master(hw, true);
1213 
1214 	adapter->rss_reta_updated = 0;
1215 
1216 	hw->adapter_stopped = true;
1217 	dev->data->dev_started = 0;
1218 
1219 	return 0;
1220 }
1221 
1222 /*
1223  * Set device link up: power on.
1224  */
1225 static int
1226 ngbe_dev_set_link_up(struct rte_eth_dev *dev)
1227 {
1228 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1229 
1230 	hw->phy.set_phy_power(hw, true);
1231 
1232 	return 0;
1233 }
1234 
1235 /*
1236  * Set device link down: power off.
1237  */
1238 static int
1239 ngbe_dev_set_link_down(struct rte_eth_dev *dev)
1240 {
1241 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1242 
1243 	hw->phy.set_phy_power(hw, false);
1244 
1245 	return 0;
1246 }
1247 
1248 /*
1249  * Reset and stop device.
1250  */
1251 static int
1252 ngbe_dev_close(struct rte_eth_dev *dev)
1253 {
1254 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1255 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1256 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1257 	int retries = 0;
1258 	int ret;
1259 
1260 	PMD_INIT_FUNC_TRACE();
1261 
1262 	ngbe_pf_reset_hw(hw);
1263 
1264 	ngbe_dev_stop(dev);
1265 
1266 	ngbe_dev_free_queues(dev);
1267 
1268 	ngbe_set_pcie_master(hw, false);
1269 
1270 	/* reprogram the RAR[0] in case user changed it. */
1271 	ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1272 
1273 	/* Unlock any pending hardware semaphore */
1274 	ngbe_swfw_lock_reset(hw);
1275 
1276 	/* disable uio intr before callback unregister */
1277 	rte_intr_disable(intr_handle);
1278 
1279 	do {
1280 		ret = rte_intr_callback_unregister(intr_handle,
1281 				ngbe_dev_interrupt_handler, dev);
1282 		if (ret >= 0 || ret == -ENOENT) {
1283 			break;
1284 		} else if (ret != -EAGAIN) {
1285 			PMD_INIT_LOG(ERR,
1286 				"intr callback unregister failed: %d",
1287 				ret);
1288 		}
1289 		rte_delay_ms(100);
1290 	} while (retries++ < (10 + NGBE_LINK_UP_TIME));
1291 
1292 	/* uninitialize PF if max_vfs not zero */
1293 	ngbe_pf_host_uninit(dev);
1294 
1295 	rte_free(dev->data->mac_addrs);
1296 	dev->data->mac_addrs = NULL;
1297 
1298 	rte_free(dev->data->hash_mac_addrs);
1299 	dev->data->hash_mac_addrs = NULL;
1300 
1301 	return ret;
1302 }
1303 
1304 /*
1305  * Reset PF device.
1306  */
1307 static int
1308 ngbe_dev_reset(struct rte_eth_dev *dev)
1309 {
1310 	int ret;
1311 
1312 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
1313 	 * its VF to make them align with it. The detailed notification
1314 	 * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1315 	 * To avoid unexpected behavior in VF, currently reset of PF with
1316 	 * SR-IOV activation is not supported. It might be supported later.
1317 	 */
1318 	if (dev->data->sriov.active)
1319 		return -ENOTSUP;
1320 
1321 	ret = eth_ngbe_dev_uninit(dev);
1322 	if (ret != 0)
1323 		return ret;
1324 
1325 	ret = eth_ngbe_dev_init(dev, NULL);
1326 
1327 	return ret;
1328 }
1329 
1330 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1331 	{                                                       \
1332 		uint32_t current_counter = rd32(hw, reg);       \
1333 		if (current_counter < last_counter)             \
1334 			current_counter += 0x100000000LL;       \
1335 		if (!hw->offset_loaded)                         \
1336 			last_counter = current_counter;         \
1337 		counter = current_counter - last_counter;       \
1338 		counter &= 0xFFFFFFFFLL;                        \
1339 	}
1340 
1341 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1342 	{                                                                \
1343 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1344 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1345 		uint64_t current_counter = (current_counter_msb << 32) | \
1346 			current_counter_lsb;                             \
1347 		if (current_counter < last_counter)                      \
1348 			current_counter += 0x1000000000LL;               \
1349 		if (!hw->offset_loaded)                                  \
1350 			last_counter = current_counter;                  \
1351 		counter = current_counter - last_counter;                \
1352 		counter &= 0xFFFFFFFFFLL;                                \
1353 	}
1354 
1355 void
1356 ngbe_read_stats_registers(struct ngbe_hw *hw,
1357 			   struct ngbe_hw_stats *hw_stats)
1358 {
1359 	unsigned int i;
1360 
1361 	/* QP Stats */
1362 	for (i = 0; i < hw->nb_rx_queues; i++) {
1363 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1364 			hw->qp_last[i].rx_qp_packets,
1365 			hw_stats->qp[i].rx_qp_packets);
1366 		UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1367 			hw->qp_last[i].rx_qp_bytes,
1368 			hw_stats->qp[i].rx_qp_bytes);
1369 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1370 			hw->qp_last[i].rx_qp_mc_packets,
1371 			hw_stats->qp[i].rx_qp_mc_packets);
1372 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1373 			hw->qp_last[i].rx_qp_bc_packets,
1374 			hw_stats->qp[i].rx_qp_bc_packets);
1375 	}
1376 
1377 	for (i = 0; i < hw->nb_tx_queues; i++) {
1378 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1379 			hw->qp_last[i].tx_qp_packets,
1380 			hw_stats->qp[i].tx_qp_packets);
1381 		UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1382 			hw->qp_last[i].tx_qp_bytes,
1383 			hw_stats->qp[i].tx_qp_bytes);
1384 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1385 			hw->qp_last[i].tx_qp_mc_packets,
1386 			hw_stats->qp[i].tx_qp_mc_packets);
1387 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1388 			hw->qp_last[i].tx_qp_bc_packets,
1389 			hw_stats->qp[i].tx_qp_bc_packets);
1390 	}
1391 
1392 	/* PB Stats */
1393 	hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1394 	hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1395 	hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1396 	hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1397 	hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1398 	hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1399 
1400 	hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1401 	hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1402 
1403 	/* DMA Stats */
1404 	hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1405 	hw_stats->tx_dma_drop += rd32(hw, NGBE_DMATXDROP);
1406 	hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1407 	hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1408 	hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1409 	hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1410 	hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1411 
1412 	/* MAC Stats */
1413 	hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1414 	hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1415 	hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1416 
1417 	hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1418 	hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1419 	hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1420 
1421 	hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1422 	hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1423 
1424 	hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1425 	hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1426 	hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1427 	hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1428 	hw_stats->rx_size_512_to_1023_packets +=
1429 			rd64(hw, NGBE_MACRX512TO1023L);
1430 	hw_stats->rx_size_1024_to_max_packets +=
1431 			rd64(hw, NGBE_MACRX1024TOMAXL);
1432 	hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1433 	hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1434 	hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1435 	hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1436 	hw_stats->tx_size_512_to_1023_packets +=
1437 			rd64(hw, NGBE_MACTX512TO1023L);
1438 	hw_stats->tx_size_1024_to_max_packets +=
1439 			rd64(hw, NGBE_MACTX1024TOMAXL);
1440 
1441 	hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1442 	hw_stats->rx_oversize_cnt += rd32(hw, NGBE_MACRXOVERSIZE);
1443 	hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1444 
1445 	/* MNG Stats */
1446 	hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1447 	hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1448 	hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1449 	hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1450 
1451 	/* MACsec Stats */
1452 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1453 	hw_stats->tx_macsec_pkts_encrypted +=
1454 			rd32(hw, NGBE_LSECTX_ENCPKT);
1455 	hw_stats->tx_macsec_pkts_protected +=
1456 			rd32(hw, NGBE_LSECTX_PROTPKT);
1457 	hw_stats->tx_macsec_octets_encrypted +=
1458 			rd32(hw, NGBE_LSECTX_ENCOCT);
1459 	hw_stats->tx_macsec_octets_protected +=
1460 			rd32(hw, NGBE_LSECTX_PROTOCT);
1461 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1462 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1463 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1464 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1465 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1466 	hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1467 	hw_stats->rx_macsec_sc_pkts_unchecked +=
1468 			rd32(hw, NGBE_LSECRX_UNCHKPKT);
1469 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1470 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1471 	for (i = 0; i < 2; i++) {
1472 		hw_stats->rx_macsec_sa_pkts_ok +=
1473 			rd32(hw, NGBE_LSECRX_OKPKT(i));
1474 		hw_stats->rx_macsec_sa_pkts_invalid +=
1475 			rd32(hw, NGBE_LSECRX_INVPKT(i));
1476 		hw_stats->rx_macsec_sa_pkts_notvalid +=
1477 			rd32(hw, NGBE_LSECRX_BADPKT(i));
1478 	}
1479 	for (i = 0; i < 4; i++) {
1480 		hw_stats->rx_macsec_sa_pkts_unusedsa +=
1481 			rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1482 		hw_stats->rx_macsec_sa_pkts_notusingsa +=
1483 			rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1484 	}
1485 	hw_stats->rx_total_missed_packets =
1486 			hw_stats->rx_up_dropped;
1487 }
1488 
1489 static int
1490 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1491 {
1492 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1493 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1494 	struct ngbe_stat_mappings *stat_mappings =
1495 			NGBE_DEV_STAT_MAPPINGS(dev);
1496 	uint32_t i, j;
1497 
1498 	ngbe_read_stats_registers(hw, hw_stats);
1499 
1500 	if (stats == NULL)
1501 		return -EINVAL;
1502 
1503 	/* Fill out the rte_eth_stats statistics structure */
1504 	stats->ipackets = hw_stats->rx_packets;
1505 	stats->ibytes = hw_stats->rx_bytes;
1506 	stats->opackets = hw_stats->tx_packets;
1507 	stats->obytes = hw_stats->tx_bytes;
1508 
1509 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1510 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1511 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1512 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1513 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1514 	for (i = 0; i < NGBE_MAX_QP; i++) {
1515 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1516 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1517 		uint32_t q_map;
1518 
1519 		q_map = (stat_mappings->rqsm[n] >> offset)
1520 				& QMAP_FIELD_RESERVED_BITS_MASK;
1521 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1522 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1523 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1524 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1525 
1526 		q_map = (stat_mappings->tqsm[n] >> offset)
1527 				& QMAP_FIELD_RESERVED_BITS_MASK;
1528 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1529 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1530 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1531 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1532 	}
1533 
1534 	/* Rx Errors */
1535 	stats->imissed  = hw_stats->rx_total_missed_packets +
1536 			  hw_stats->rx_dma_drop;
1537 	stats->ierrors  = hw_stats->rx_crc_errors +
1538 			  hw_stats->rx_mac_short_packet_dropped +
1539 			  hw_stats->rx_length_errors +
1540 			  hw_stats->rx_undersize_errors +
1541 			  hw_stats->rdb_drp_cnt +
1542 			  hw_stats->rx_illegal_byte_errors +
1543 			  hw_stats->rx_error_bytes +
1544 			  hw_stats->rx_fragment_errors;
1545 
1546 	/* Tx Errors */
1547 	stats->oerrors  = 0;
1548 	return 0;
1549 }
1550 
1551 static int
1552 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1553 {
1554 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1555 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1556 
1557 	/* HW registers are cleared on read */
1558 	hw->offset_loaded = 0;
1559 	ngbe_dev_stats_get(dev, NULL);
1560 	hw->offset_loaded = 1;
1561 
1562 	/* Reset software totals */
1563 	memset(hw_stats, 0, sizeof(*hw_stats));
1564 
1565 	return 0;
1566 }
1567 
1568 /* This function calculates the number of xstats based on the current config */
1569 static unsigned
1570 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1571 {
1572 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1573 	return NGBE_NB_HW_STATS +
1574 	       NGBE_NB_QP_STATS * nb_queues;
1575 }
1576 
1577 static inline int
1578 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1579 {
1580 	int nb, st;
1581 
1582 	/* Extended stats from ngbe_hw_stats */
1583 	if (id < NGBE_NB_HW_STATS) {
1584 		snprintf(name, size, "[hw]%s",
1585 			rte_ngbe_stats_strings[id].name);
1586 		return 0;
1587 	}
1588 	id -= NGBE_NB_HW_STATS;
1589 
1590 	/* Queue Stats */
1591 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1592 		nb = id / NGBE_NB_QP_STATS;
1593 		st = id % NGBE_NB_QP_STATS;
1594 		snprintf(name, size, "[q%u]%s", nb,
1595 			rte_ngbe_qp_strings[st].name);
1596 		return 0;
1597 	}
1598 	id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1599 
1600 	return -(int)(id + 1);
1601 }
1602 
1603 static inline int
1604 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1605 {
1606 	int nb, st;
1607 
1608 	/* Extended stats from ngbe_hw_stats */
1609 	if (id < NGBE_NB_HW_STATS) {
1610 		*offset = rte_ngbe_stats_strings[id].offset;
1611 		return 0;
1612 	}
1613 	id -= NGBE_NB_HW_STATS;
1614 
1615 	/* Queue Stats */
1616 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1617 		nb = id / NGBE_NB_QP_STATS;
1618 		st = id % NGBE_NB_QP_STATS;
1619 		*offset = rte_ngbe_qp_strings[st].offset +
1620 			nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1621 		return 0;
1622 	}
1623 
1624 	return -1;
1625 }
1626 
1627 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1628 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1629 {
1630 	unsigned int i, count;
1631 
1632 	count = ngbe_xstats_calc_num(dev);
1633 	if (xstats_names == NULL)
1634 		return count;
1635 
1636 	/* Note: limit >= cnt_stats checked upstream
1637 	 * in rte_eth_xstats_names()
1638 	 */
1639 	limit = min(limit, count);
1640 
1641 	/* Extended stats from ngbe_hw_stats */
1642 	for (i = 0; i < limit; i++) {
1643 		if (ngbe_get_name_by_id(i, xstats_names[i].name,
1644 			sizeof(xstats_names[i].name))) {
1645 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1646 			break;
1647 		}
1648 	}
1649 
1650 	return i;
1651 }
1652 
1653 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1654 	const uint64_t *ids,
1655 	struct rte_eth_xstat_name *xstats_names,
1656 	unsigned int limit)
1657 {
1658 	unsigned int i;
1659 
1660 	if (ids == NULL)
1661 		return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1662 
1663 	for (i = 0; i < limit; i++) {
1664 		if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1665 				sizeof(xstats_names[i].name))) {
1666 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1667 			return -1;
1668 		}
1669 	}
1670 
1671 	return i;
1672 }
1673 
1674 static int
1675 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1676 					 unsigned int limit)
1677 {
1678 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1679 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1680 	unsigned int i, count;
1681 
1682 	ngbe_read_stats_registers(hw, hw_stats);
1683 
1684 	/* If this is a reset xstats is NULL, and we have cleared the
1685 	 * registers by reading them.
1686 	 */
1687 	count = ngbe_xstats_calc_num(dev);
1688 	if (xstats == NULL)
1689 		return count;
1690 
1691 	limit = min(limit, ngbe_xstats_calc_num(dev));
1692 
1693 	/* Extended stats from ngbe_hw_stats */
1694 	for (i = 0; i < limit; i++) {
1695 		uint32_t offset = 0;
1696 
1697 		if (ngbe_get_offset_by_id(i, &offset)) {
1698 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1699 			break;
1700 		}
1701 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1702 		xstats[i].id = i;
1703 	}
1704 
1705 	return i;
1706 }
1707 
1708 static int
1709 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1710 					 unsigned int limit)
1711 {
1712 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1713 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1714 	unsigned int i, count;
1715 
1716 	ngbe_read_stats_registers(hw, hw_stats);
1717 
1718 	/* If this is a reset xstats is NULL, and we have cleared the
1719 	 * registers by reading them.
1720 	 */
1721 	count = ngbe_xstats_calc_num(dev);
1722 	if (values == NULL)
1723 		return count;
1724 
1725 	limit = min(limit, ngbe_xstats_calc_num(dev));
1726 
1727 	/* Extended stats from ngbe_hw_stats */
1728 	for (i = 0; i < limit; i++) {
1729 		uint32_t offset;
1730 
1731 		if (ngbe_get_offset_by_id(i, &offset)) {
1732 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1733 			break;
1734 		}
1735 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1736 	}
1737 
1738 	return i;
1739 }
1740 
1741 static int
1742 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1743 		uint64_t *values, unsigned int limit)
1744 {
1745 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1746 	unsigned int i;
1747 
1748 	if (ids == NULL)
1749 		return ngbe_dev_xstats_get_(dev, values, limit);
1750 
1751 	for (i = 0; i < limit; i++) {
1752 		uint32_t offset;
1753 
1754 		if (ngbe_get_offset_by_id(ids[i], &offset)) {
1755 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1756 			break;
1757 		}
1758 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1759 	}
1760 
1761 	return i;
1762 }
1763 
1764 static int
1765 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1766 {
1767 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1768 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1769 
1770 	/* HW registers are cleared on read */
1771 	hw->offset_loaded = 0;
1772 	ngbe_read_stats_registers(hw, hw_stats);
1773 	hw->offset_loaded = 1;
1774 
1775 	/* Reset software totals */
1776 	memset(hw_stats, 0, sizeof(*hw_stats));
1777 
1778 	return 0;
1779 }
1780 
1781 static int
1782 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1783 {
1784 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1785 	int ret;
1786 
1787 	ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1788 
1789 	if (ret < 0)
1790 		return -EINVAL;
1791 
1792 	ret += 1; /* add the size of '\0' */
1793 	if (fw_size < (size_t)ret)
1794 		return ret;
1795 
1796 	return 0;
1797 }
1798 
1799 static int
1800 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1801 {
1802 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1803 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1804 
1805 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1806 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1807 	dev_info->min_rx_bufsize = 1024;
1808 	dev_info->max_rx_pktlen = 15872;
1809 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1810 	dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1811 	dev_info->max_vfs = pci_dev->max_vfs;
1812 	dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1813 	dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1814 				     dev_info->rx_queue_offload_capa);
1815 	dev_info->tx_queue_offload_capa = 0;
1816 	dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1817 
1818 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1819 		.rx_thresh = {
1820 			.pthresh = NGBE_DEFAULT_RX_PTHRESH,
1821 			.hthresh = NGBE_DEFAULT_RX_HTHRESH,
1822 			.wthresh = NGBE_DEFAULT_RX_WTHRESH,
1823 		},
1824 		.rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1825 		.rx_drop_en = 0,
1826 		.offloads = 0,
1827 	};
1828 
1829 	dev_info->default_txconf = (struct rte_eth_txconf) {
1830 		.tx_thresh = {
1831 			.pthresh = NGBE_DEFAULT_TX_PTHRESH,
1832 			.hthresh = NGBE_DEFAULT_TX_HTHRESH,
1833 			.wthresh = NGBE_DEFAULT_TX_WTHRESH,
1834 		},
1835 		.tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1836 		.offloads = 0,
1837 	};
1838 
1839 	dev_info->rx_desc_lim = rx_desc_lim;
1840 	dev_info->tx_desc_lim = tx_desc_lim;
1841 
1842 	dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1843 	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1844 	dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1845 
1846 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1847 				RTE_ETH_LINK_SPEED_10M;
1848 
1849 	/* Driver-preferred Rx/Tx parameters */
1850 	dev_info->default_rxportconf.burst_size = 32;
1851 	dev_info->default_txportconf.burst_size = 32;
1852 	dev_info->default_rxportconf.nb_queues = 1;
1853 	dev_info->default_txportconf.nb_queues = 1;
1854 	dev_info->default_rxportconf.ring_size = 256;
1855 	dev_info->default_txportconf.ring_size = 256;
1856 
1857 	return 0;
1858 }
1859 
1860 const uint32_t *
1861 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1862 {
1863 	if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1864 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1865 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1866 	    dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1867 		return ngbe_get_supported_ptypes();
1868 
1869 	return NULL;
1870 }
1871 
1872 static void
1873 ngbe_dev_overheat(struct rte_eth_dev *dev)
1874 {
1875 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1876 	s32 temp_state;
1877 
1878 	temp_state = hw->mac.check_overtemp(hw);
1879 	if (!temp_state)
1880 		return;
1881 
1882 	if (temp_state == NGBE_ERR_UNDERTEMP) {
1883 		PMD_DRV_LOG(CRIT, "Network adapter has been started again, "
1884 			"since the temperature has been back to normal state.");
1885 		wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, NGBE_PBRXCTL_ENA);
1886 		ngbe_dev_set_link_up(dev);
1887 	} else if (temp_state == NGBE_ERR_OVERTEMP) {
1888 		PMD_DRV_LOG(CRIT, "Network adapter has been stopped because it has over heated.");
1889 		wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0);
1890 		ngbe_dev_set_link_down(dev);
1891 	}
1892 }
1893 
1894 void
1895 ngbe_dev_setup_link_alarm_handler(void *param)
1896 {
1897 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1898 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1899 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1900 	u32 speed;
1901 	bool autoneg = false;
1902 
1903 	speed = hw->phy.autoneg_advertised;
1904 	if (!speed)
1905 		hw->mac.get_link_capabilities(hw, &speed, &autoneg);
1906 
1907 	hw->mac.setup_link(hw, speed, true);
1908 
1909 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1910 }
1911 
1912 /* return 0 means link status changed, -1 means not changed */
1913 int
1914 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1915 			    int wait_to_complete)
1916 {
1917 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1918 	struct rte_eth_link link;
1919 	u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1920 	u32 lan_speed = 0;
1921 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1922 	bool link_up;
1923 	int err;
1924 	int wait = 1;
1925 
1926 	memset(&link, 0, sizeof(link));
1927 	link.link_status = RTE_ETH_LINK_DOWN;
1928 	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1929 	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1930 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1931 			~RTE_ETH_LINK_SPEED_AUTONEG);
1932 
1933 	hw->mac.get_link_status = true;
1934 
1935 	if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1936 		return rte_eth_linkstatus_set(dev, &link);
1937 
1938 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
1939 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1940 		wait = 0;
1941 
1942 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1943 	if (err != 0) {
1944 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1945 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1946 		return rte_eth_linkstatus_set(dev, &link);
1947 	}
1948 
1949 	if (!link_up)
1950 		return rte_eth_linkstatus_set(dev, &link);
1951 
1952 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1953 	link.link_status = RTE_ETH_LINK_UP;
1954 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1955 
1956 	switch (link_speed) {
1957 	default:
1958 	case NGBE_LINK_SPEED_UNKNOWN:
1959 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1960 		break;
1961 
1962 	case NGBE_LINK_SPEED_10M_FULL:
1963 		link.link_speed = RTE_ETH_SPEED_NUM_10M;
1964 		lan_speed = 0;
1965 		break;
1966 
1967 	case NGBE_LINK_SPEED_100M_FULL:
1968 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
1969 		lan_speed = 1;
1970 		break;
1971 
1972 	case NGBE_LINK_SPEED_1GB_FULL:
1973 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
1974 		lan_speed = 2;
1975 		break;
1976 	}
1977 
1978 	if (hw->is_pf) {
1979 		wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1980 		if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1981 				NGBE_LINK_SPEED_100M_FULL |
1982 				NGBE_LINK_SPEED_10M_FULL)) {
1983 			wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1984 				NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1985 		}
1986 	}
1987 
1988 	return rte_eth_linkstatus_set(dev, &link);
1989 }
1990 
1991 static int
1992 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1993 {
1994 	return ngbe_dev_link_update_share(dev, wait_to_complete);
1995 }
1996 
1997 static int
1998 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1999 {
2000 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2001 	uint32_t fctrl;
2002 
2003 	fctrl = rd32(hw, NGBE_PSRCTL);
2004 	fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
2005 	wr32(hw, NGBE_PSRCTL, fctrl);
2006 
2007 	return 0;
2008 }
2009 
2010 static int
2011 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2012 {
2013 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2014 	uint32_t fctrl;
2015 
2016 	fctrl = rd32(hw, NGBE_PSRCTL);
2017 	fctrl &= (~NGBE_PSRCTL_UCP);
2018 	if (dev->data->all_multicast == 1)
2019 		fctrl |= NGBE_PSRCTL_MCP;
2020 	else
2021 		fctrl &= (~NGBE_PSRCTL_MCP);
2022 	wr32(hw, NGBE_PSRCTL, fctrl);
2023 
2024 	return 0;
2025 }
2026 
2027 static int
2028 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2029 {
2030 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2031 	uint32_t fctrl;
2032 
2033 	fctrl = rd32(hw, NGBE_PSRCTL);
2034 	fctrl |= NGBE_PSRCTL_MCP;
2035 	wr32(hw, NGBE_PSRCTL, fctrl);
2036 
2037 	return 0;
2038 }
2039 
2040 static int
2041 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2042 {
2043 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2044 	uint32_t fctrl;
2045 
2046 	if (dev->data->promiscuous == 1)
2047 		return 0; /* must remain in all_multicast mode */
2048 
2049 	fctrl = rd32(hw, NGBE_PSRCTL);
2050 	fctrl &= (~NGBE_PSRCTL_MCP);
2051 	wr32(hw, NGBE_PSRCTL, fctrl);
2052 
2053 	return 0;
2054 }
2055 
2056 /**
2057  * It clears the interrupt causes and enables the interrupt.
2058  * It will be called once only during NIC initialized.
2059  *
2060  * @param dev
2061  *  Pointer to struct rte_eth_dev.
2062  * @param on
2063  *  Enable or Disable.
2064  *
2065  * @return
2066  *  - On success, zero.
2067  *  - On failure, a negative value.
2068  */
2069 static int
2070 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2071 {
2072 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2073 
2074 	ngbe_dev_link_status_print(dev);
2075 	if (on != 0) {
2076 		intr->mask_misc |= NGBE_ICRMISC_PHY;
2077 		intr->mask_misc |= NGBE_ICRMISC_GPIO;
2078 	} else {
2079 		intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2080 		intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
2081 	}
2082 
2083 	return 0;
2084 }
2085 
2086 /**
2087  * It clears the interrupt causes and enables the interrupt.
2088  * It will be called once only during NIC initialized.
2089  *
2090  * @param dev
2091  *  Pointer to struct rte_eth_dev.
2092  *
2093  * @return
2094  *  - On success, zero.
2095  *  - On failure, a negative value.
2096  */
2097 static int
2098 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
2099 {
2100 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2101 	u64 mask;
2102 
2103 	mask = NGBE_ICR_MASK;
2104 	mask &= (1ULL << NGBE_MISC_VEC_ID);
2105 	intr->mask |= mask;
2106 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
2107 
2108 	return 0;
2109 }
2110 
2111 /**
2112  * It clears the interrupt causes and enables the interrupt.
2113  * It will be called once only during NIC initialized.
2114  *
2115  * @param dev
2116  *  Pointer to struct rte_eth_dev.
2117  *
2118  * @return
2119  *  - On success, zero.
2120  *  - On failure, a negative value.
2121  */
2122 static int
2123 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2124 {
2125 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2126 	u64 mask;
2127 
2128 	mask = NGBE_ICR_MASK;
2129 	mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
2130 	intr->mask |= mask;
2131 
2132 	return 0;
2133 }
2134 
2135 /**
2136  * It clears the interrupt causes and enables the interrupt.
2137  * It will be called once only during NIC initialized.
2138  *
2139  * @param dev
2140  *  Pointer to struct rte_eth_dev.
2141  *
2142  * @return
2143  *  - On success, zero.
2144  *  - On failure, a negative value.
2145  */
2146 static int
2147 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2148 {
2149 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2150 
2151 	intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
2152 
2153 	return 0;
2154 }
2155 
2156 /*
2157  * It reads ICR and sets flag for the link_update.
2158  *
2159  * @param dev
2160  *  Pointer to struct rte_eth_dev.
2161  *
2162  * @return
2163  *  - On success, zero.
2164  *  - On failure, a negative value.
2165  */
2166 static int
2167 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2168 {
2169 	uint32_t eicr;
2170 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2171 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2172 
2173 	/* read-on-clear nic registers here */
2174 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2175 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2176 
2177 	intr->flags = 0;
2178 
2179 	/* set flag for async link update */
2180 	if (eicr & NGBE_ICRMISC_PHY)
2181 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2182 
2183 	if (eicr & NGBE_ICRMISC_VFMBX)
2184 		intr->flags |= NGBE_FLAG_MAILBOX;
2185 
2186 	if (eicr & NGBE_ICRMISC_LNKSEC)
2187 		intr->flags |= NGBE_FLAG_MACSEC;
2188 
2189 	if (eicr & NGBE_ICRMISC_GPIO)
2190 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2191 
2192 	if (eicr & NGBE_ICRMISC_HEAT)
2193 		intr->flags |= NGBE_FLAG_OVERHEAT;
2194 
2195 	((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0;
2196 
2197 	return 0;
2198 }
2199 
2200 /**
2201  * It gets and then prints the link status.
2202  *
2203  * @param dev
2204  *  Pointer to struct rte_eth_dev.
2205  *
2206  * @return
2207  *  - On success, zero.
2208  *  - On failure, a negative value.
2209  */
2210 static void
2211 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2212 {
2213 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2214 	struct rte_eth_link link;
2215 
2216 	rte_eth_linkstatus_get(dev, &link);
2217 
2218 	if (link.link_status == RTE_ETH_LINK_UP) {
2219 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2220 					(int)(dev->data->port_id),
2221 					(unsigned int)link.link_speed,
2222 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2223 					"full-duplex" : "half-duplex");
2224 	} else {
2225 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
2226 				(int)(dev->data->port_id));
2227 	}
2228 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2229 				pci_dev->addr.domain,
2230 				pci_dev->addr.bus,
2231 				pci_dev->addr.devid,
2232 				pci_dev->addr.function);
2233 }
2234 
2235 /*
2236  * It executes link_update after knowing an interrupt occurred.
2237  *
2238  * @param dev
2239  *  Pointer to struct rte_eth_dev.
2240  *
2241  * @return
2242  *  - On success, zero.
2243  *  - On failure, a negative value.
2244  */
2245 static int
2246 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2247 {
2248 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2249 
2250 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2251 
2252 	if (intr->flags & NGBE_FLAG_MAILBOX) {
2253 		ngbe_pf_mbx_process(dev);
2254 		intr->flags &= ~NGBE_FLAG_MAILBOX;
2255 	}
2256 
2257 	if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2258 		struct rte_eth_link link;
2259 
2260 		/*get the link status before link update, for predicting later*/
2261 		rte_eth_linkstatus_get(dev, &link);
2262 
2263 		ngbe_dev_link_update(dev, 0);
2264 		intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2265 		ngbe_dev_link_status_print(dev);
2266 		if (dev->data->dev_link.link_speed != link.link_speed)
2267 			rte_eth_dev_callback_process(dev,
2268 				RTE_ETH_EVENT_INTR_LSC, NULL);
2269 	}
2270 
2271 	if (intr->flags & NGBE_FLAG_OVERHEAT) {
2272 		ngbe_dev_overheat(dev);
2273 		intr->flags &= ~NGBE_FLAG_OVERHEAT;
2274 	}
2275 
2276 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
2277 	ngbe_enable_intr(dev);
2278 
2279 	return 0;
2280 }
2281 
2282 /**
2283  * Interrupt handler triggered by NIC  for handling
2284  * specific interrupt.
2285  *
2286  * @param param
2287  *  The address of parameter (struct rte_eth_dev *) registered before.
2288  */
2289 static void
2290 ngbe_dev_interrupt_handler(void *param)
2291 {
2292 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2293 
2294 	ngbe_dev_interrupt_get_status(dev);
2295 	ngbe_dev_interrupt_action(dev);
2296 }
2297 
2298 static int
2299 ngbe_dev_led_on(struct rte_eth_dev *dev)
2300 {
2301 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2302 	return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
2303 }
2304 
2305 static int
2306 ngbe_dev_led_off(struct rte_eth_dev *dev)
2307 {
2308 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2309 	return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
2310 }
2311 
2312 static int
2313 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2314 {
2315 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2316 	uint32_t mflcn_reg;
2317 	uint32_t fccfg_reg;
2318 	int rx_pause;
2319 	int tx_pause;
2320 
2321 	fc_conf->pause_time = hw->fc.pause_time;
2322 	fc_conf->high_water = hw->fc.high_water;
2323 	fc_conf->low_water = hw->fc.low_water;
2324 	fc_conf->send_xon = hw->fc.send_xon;
2325 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2326 
2327 	/*
2328 	 * Return rx_pause status according to actual setting of
2329 	 * RXFCCFG register.
2330 	 */
2331 	mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2332 	if (mflcn_reg & NGBE_RXFCCFG_FC)
2333 		rx_pause = 1;
2334 	else
2335 		rx_pause = 0;
2336 
2337 	/*
2338 	 * Return tx_pause status according to actual setting of
2339 	 * TXFCCFG register.
2340 	 */
2341 	fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2342 	if (fccfg_reg & NGBE_TXFCCFG_FC)
2343 		tx_pause = 1;
2344 	else
2345 		tx_pause = 0;
2346 
2347 	if (rx_pause && tx_pause)
2348 		fc_conf->mode = RTE_ETH_FC_FULL;
2349 	else if (rx_pause)
2350 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2351 	else if (tx_pause)
2352 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2353 	else
2354 		fc_conf->mode = RTE_ETH_FC_NONE;
2355 
2356 	return 0;
2357 }
2358 
2359 static int
2360 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2361 {
2362 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2363 	int err;
2364 	uint32_t rx_buf_size;
2365 	uint32_t max_high_water;
2366 	enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2367 		ngbe_fc_none,
2368 		ngbe_fc_rx_pause,
2369 		ngbe_fc_tx_pause,
2370 		ngbe_fc_full
2371 	};
2372 
2373 	PMD_INIT_FUNC_TRACE();
2374 
2375 	rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2376 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2377 
2378 	/*
2379 	 * At least reserve one Ethernet frame for watermark
2380 	 * high_water/low_water in kilo bytes for ngbe
2381 	 */
2382 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2383 	if (fc_conf->high_water > max_high_water ||
2384 	    fc_conf->high_water < fc_conf->low_water) {
2385 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2386 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2387 		return -EINVAL;
2388 	}
2389 
2390 	hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2391 	hw->fc.pause_time     = fc_conf->pause_time;
2392 	hw->fc.high_water     = fc_conf->high_water;
2393 	hw->fc.low_water      = fc_conf->low_water;
2394 	hw->fc.send_xon       = fc_conf->send_xon;
2395 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2396 
2397 	err = hw->mac.fc_enable(hw);
2398 
2399 	/* Not negotiated is not an error case */
2400 	if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2401 		wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2402 		      (fc_conf->mac_ctrl_frame_fwd
2403 		       ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2404 		ngbe_flush(hw);
2405 
2406 		return 0;
2407 	}
2408 
2409 	PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2410 	return -EIO;
2411 }
2412 
2413 int
2414 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2415 			  struct rte_eth_rss_reta_entry64 *reta_conf,
2416 			  uint16_t reta_size)
2417 {
2418 	uint8_t i, j, mask;
2419 	uint32_t reta;
2420 	uint16_t idx, shift;
2421 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2422 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2423 
2424 	PMD_INIT_FUNC_TRACE();
2425 
2426 	if (!hw->is_pf) {
2427 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2428 			"NIC.");
2429 		return -ENOTSUP;
2430 	}
2431 
2432 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2433 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2434 			"(%d) doesn't match the number hardware can supported "
2435 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2436 		return -EINVAL;
2437 	}
2438 
2439 	for (i = 0; i < reta_size; i += 4) {
2440 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2441 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2442 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2443 		if (!mask)
2444 			continue;
2445 
2446 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2447 		for (j = 0; j < 4; j++) {
2448 			if (RS8(mask, j, 0x1)) {
2449 				reta  &= ~(MS32(8 * j, 0xFF));
2450 				reta |= LS32(reta_conf[idx].reta[shift + j],
2451 						8 * j, 0xFF);
2452 			}
2453 		}
2454 		wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2455 	}
2456 	adapter->rss_reta_updated = 1;
2457 
2458 	return 0;
2459 }
2460 
2461 int
2462 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2463 			 struct rte_eth_rss_reta_entry64 *reta_conf,
2464 			 uint16_t reta_size)
2465 {
2466 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2467 	uint8_t i, j, mask;
2468 	uint32_t reta;
2469 	uint16_t idx, shift;
2470 
2471 	PMD_INIT_FUNC_TRACE();
2472 
2473 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2474 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2475 			"(%d) doesn't match the number hardware can supported "
2476 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2477 		return -EINVAL;
2478 	}
2479 
2480 	for (i = 0; i < reta_size; i += 4) {
2481 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2482 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2483 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2484 		if (!mask)
2485 			continue;
2486 
2487 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2488 		for (j = 0; j < 4; j++) {
2489 			if (RS8(mask, j, 0x1))
2490 				reta_conf[idx].reta[shift + j] =
2491 					(uint16_t)RS32(reta, 8 * j, 0xFF);
2492 		}
2493 	}
2494 
2495 	return 0;
2496 }
2497 
2498 static int
2499 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2500 				uint32_t index, uint32_t pool)
2501 {
2502 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2503 	uint32_t enable_addr = 1;
2504 
2505 	return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2506 			     pool, enable_addr);
2507 }
2508 
2509 static void
2510 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2511 {
2512 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2513 
2514 	ngbe_clear_rar(hw, index);
2515 }
2516 
2517 static int
2518 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2519 {
2520 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2521 
2522 	ngbe_remove_rar(dev, 0);
2523 	ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2524 
2525 	return 0;
2526 }
2527 
2528 static int
2529 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2530 {
2531 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2532 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
2533 	struct rte_eth_dev_data *dev_data = dev->data;
2534 
2535 	/* If device is started, refuse mtu that requires the support of
2536 	 * scattered packets when this feature has not been enabled before.
2537 	 */
2538 	if (dev_data->dev_started && !dev_data->scattered_rx &&
2539 	    (frame_size + 2 * RTE_VLAN_HLEN >
2540 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2541 		PMD_INIT_LOG(ERR, "Stop port first.");
2542 		return -EINVAL;
2543 	}
2544 
2545 	wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2546 		NGBE_FRMSZ_MAX(frame_size));
2547 
2548 	return 0;
2549 }
2550 
2551 static uint32_t
2552 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2553 {
2554 	uint32_t vector = 0;
2555 
2556 	switch (hw->mac.mc_filter_type) {
2557 	case 0:   /* use bits [47:36] of the address */
2558 		vector = ((uc_addr->addr_bytes[4] >> 4) |
2559 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
2560 		break;
2561 	case 1:   /* use bits [46:35] of the address */
2562 		vector = ((uc_addr->addr_bytes[4] >> 3) |
2563 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
2564 		break;
2565 	case 2:   /* use bits [45:34] of the address */
2566 		vector = ((uc_addr->addr_bytes[4] >> 2) |
2567 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
2568 		break;
2569 	case 3:   /* use bits [43:32] of the address */
2570 		vector = ((uc_addr->addr_bytes[4]) |
2571 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
2572 		break;
2573 	default:  /* Invalid mc_filter_type */
2574 		break;
2575 	}
2576 
2577 	/* vector can only be 12-bits or boundary will be exceeded */
2578 	vector &= 0xFFF;
2579 	return vector;
2580 }
2581 
2582 static int
2583 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2584 			struct rte_ether_addr *mac_addr, uint8_t on)
2585 {
2586 	uint32_t vector;
2587 	uint32_t uta_idx;
2588 	uint32_t reg_val;
2589 	uint32_t uta_mask;
2590 	uint32_t psrctl;
2591 
2592 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2593 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2594 
2595 	vector = ngbe_uta_vector(hw, mac_addr);
2596 	uta_idx = (vector >> 5) & 0x7F;
2597 	uta_mask = 0x1UL << (vector & 0x1F);
2598 
2599 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2600 		return 0;
2601 
2602 	reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2603 	if (on) {
2604 		uta_info->uta_in_use++;
2605 		reg_val |= uta_mask;
2606 		uta_info->uta_shadow[uta_idx] |= uta_mask;
2607 	} else {
2608 		uta_info->uta_in_use--;
2609 		reg_val &= ~uta_mask;
2610 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2611 	}
2612 
2613 	wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2614 
2615 	psrctl = rd32(hw, NGBE_PSRCTL);
2616 	if (uta_info->uta_in_use > 0)
2617 		psrctl |= NGBE_PSRCTL_UCHFENA;
2618 	else
2619 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2620 
2621 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2622 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2623 	wr32(hw, NGBE_PSRCTL, psrctl);
2624 
2625 	return 0;
2626 }
2627 
2628 static int
2629 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2630 {
2631 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2632 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2633 	uint32_t psrctl;
2634 	int i;
2635 
2636 	if (on) {
2637 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2638 			uta_info->uta_shadow[i] = ~0;
2639 			wr32(hw, NGBE_UCADDRTBL(i), ~0);
2640 		}
2641 	} else {
2642 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2643 			uta_info->uta_shadow[i] = 0;
2644 			wr32(hw, NGBE_UCADDRTBL(i), 0);
2645 		}
2646 	}
2647 
2648 	psrctl = rd32(hw, NGBE_PSRCTL);
2649 	if (on)
2650 		psrctl |= NGBE_PSRCTL_UCHFENA;
2651 	else
2652 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2653 
2654 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2655 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2656 	wr32(hw, NGBE_PSRCTL, psrctl);
2657 
2658 	return 0;
2659 }
2660 
2661 /**
2662  * Set the IVAR registers, mapping interrupt causes to vectors
2663  * @param hw
2664  *  pointer to ngbe_hw struct
2665  * @direction
2666  *  0 for Rx, 1 for Tx, -1 for other causes
2667  * @queue
2668  *  queue to map the corresponding interrupt to
2669  * @msix_vector
2670  *  the vector to map to the corresponding queue
2671  */
2672 void
2673 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2674 		   uint8_t queue, uint8_t msix_vector)
2675 {
2676 	uint32_t tmp, idx;
2677 
2678 	if (direction == -1) {
2679 		/* other causes */
2680 		msix_vector |= NGBE_IVARMISC_VLD;
2681 		idx = 0;
2682 		tmp = rd32(hw, NGBE_IVARMISC);
2683 		tmp &= ~(0xFF << idx);
2684 		tmp |= (msix_vector << idx);
2685 		wr32(hw, NGBE_IVARMISC, tmp);
2686 	} else {
2687 		/* rx or tx causes */
2688 		/* Workaround for ICR lost */
2689 		idx = ((16 * (queue & 1)) + (8 * direction));
2690 		tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2691 		tmp &= ~(0xFF << idx);
2692 		tmp |= (msix_vector << idx);
2693 		wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2694 	}
2695 }
2696 
2697 /**
2698  * Sets up the hardware to properly generate MSI-X interrupts
2699  * @hw
2700  *  board private structure
2701  */
2702 static void
2703 ngbe_configure_msix(struct rte_eth_dev *dev)
2704 {
2705 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2706 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2707 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2708 	uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2709 	uint32_t vec = NGBE_MISC_VEC_ID;
2710 	uint32_t gpie;
2711 
2712 	/*
2713 	 * Won't configure MSI-X register if no mapping is done
2714 	 * between intr vector and event fd
2715 	 * but if MSI-X has been enabled already, need to configure
2716 	 * auto clean, auto mask and throttling.
2717 	 */
2718 	gpie = rd32(hw, NGBE_GPIE);
2719 	if (!rte_intr_dp_is_en(intr_handle) &&
2720 	    !(gpie & NGBE_GPIE_MSIX))
2721 		return;
2722 
2723 	if (rte_intr_allow_others(intr_handle)) {
2724 		base = NGBE_RX_VEC_START;
2725 		vec = base;
2726 	}
2727 
2728 	/* setup GPIE for MSI-X mode */
2729 	gpie = rd32(hw, NGBE_GPIE);
2730 	gpie |= NGBE_GPIE_MSIX;
2731 	wr32(hw, NGBE_GPIE, gpie);
2732 
2733 	/* Populate the IVAR table and set the ITR values to the
2734 	 * corresponding register.
2735 	 */
2736 	if (rte_intr_dp_is_en(intr_handle)) {
2737 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2738 			queue_id++) {
2739 			/* by default, 1:1 mapping */
2740 			ngbe_set_ivar_map(hw, 0, queue_id, vec);
2741 			rte_intr_vec_list_index_set(intr_handle,
2742 							   queue_id, vec);
2743 			if (vec < base + rte_intr_nb_efd_get(intr_handle)
2744 			    - 1)
2745 				vec++;
2746 		}
2747 
2748 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2749 	}
2750 	wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2751 			NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2752 			| NGBE_ITR_WRDSA);
2753 }
2754 
2755 static u8 *
2756 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2757 			u8 **mc_addr_ptr, u32 *vmdq)
2758 {
2759 	u8 *mc_addr;
2760 
2761 	*vmdq = 0;
2762 	mc_addr = *mc_addr_ptr;
2763 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2764 	return mc_addr;
2765 }
2766 
2767 int
2768 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2769 			  struct rte_ether_addr *mc_addr_set,
2770 			  uint32_t nb_mc_addr)
2771 {
2772 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2773 	u8 *mc_addr_list;
2774 
2775 	mc_addr_list = (u8 *)mc_addr_set;
2776 	return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2777 					 ngbe_dev_addr_list_itr, TRUE);
2778 }
2779 
2780 static uint64_t
2781 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
2782 {
2783 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2784 	uint64_t systime_cycles;
2785 
2786 	systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
2787 	systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
2788 
2789 	return systime_cycles;
2790 }
2791 
2792 static uint64_t
2793 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2794 {
2795 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2796 	uint64_t rx_tstamp_cycles;
2797 
2798 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
2799 	rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
2800 	rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
2801 
2802 	return rx_tstamp_cycles;
2803 }
2804 
2805 static uint64_t
2806 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2807 {
2808 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2809 	uint64_t tx_tstamp_cycles;
2810 
2811 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
2812 	tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
2813 	tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
2814 
2815 	return tx_tstamp_cycles;
2816 }
2817 
2818 static void
2819 ngbe_start_timecounters(struct rte_eth_dev *dev)
2820 {
2821 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2822 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2823 	uint32_t incval = 0;
2824 	uint32_t shift = 0;
2825 
2826 	incval = NGBE_INCVAL_1GB;
2827 	shift = NGBE_INCVAL_SHIFT_1GB;
2828 
2829 	wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
2830 
2831 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
2832 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2833 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2834 
2835 	adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2836 	adapter->systime_tc.cc_shift = shift;
2837 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
2838 
2839 	adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2840 	adapter->rx_tstamp_tc.cc_shift = shift;
2841 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2842 
2843 	adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2844 	adapter->tx_tstamp_tc.cc_shift = shift;
2845 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2846 }
2847 
2848 static int
2849 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2850 {
2851 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2852 
2853 	adapter->systime_tc.nsec += delta;
2854 	adapter->rx_tstamp_tc.nsec += delta;
2855 	adapter->tx_tstamp_tc.nsec += delta;
2856 
2857 	return 0;
2858 }
2859 
2860 static int
2861 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2862 {
2863 	uint64_t ns;
2864 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2865 
2866 	ns = rte_timespec_to_ns(ts);
2867 	/* Set the timecounters to a new value. */
2868 	adapter->systime_tc.nsec = ns;
2869 	adapter->rx_tstamp_tc.nsec = ns;
2870 	adapter->tx_tstamp_tc.nsec = ns;
2871 
2872 	return 0;
2873 }
2874 
2875 static int
2876 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2877 {
2878 	uint64_t ns, systime_cycles;
2879 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2880 
2881 	systime_cycles = ngbe_read_systime_cyclecounter(dev);
2882 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
2883 	*ts = rte_ns_to_timespec(ns);
2884 
2885 	return 0;
2886 }
2887 
2888 static int
2889 ngbe_timesync_enable(struct rte_eth_dev *dev)
2890 {
2891 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2892 	uint32_t tsync_ctl;
2893 
2894 	/* Stop the timesync system time. */
2895 	wr32(hw, NGBE_TSTIMEINC, 0x0);
2896 	/* Reset the timesync system time value. */
2897 	wr32(hw, NGBE_TSTIMEL, 0x0);
2898 	wr32(hw, NGBE_TSTIMEH, 0x0);
2899 
2900 	ngbe_start_timecounters(dev);
2901 
2902 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2903 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588),
2904 		RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588);
2905 
2906 	/* Enable timestamping of received PTP packets. */
2907 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2908 	tsync_ctl |= NGBE_TSRXCTL_ENA;
2909 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2910 
2911 	/* Enable timestamping of transmitted PTP packets. */
2912 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2913 	tsync_ctl |= NGBE_TSTXCTL_ENA;
2914 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2915 
2916 	ngbe_flush(hw);
2917 
2918 	return 0;
2919 }
2920 
2921 static int
2922 ngbe_timesync_disable(struct rte_eth_dev *dev)
2923 {
2924 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2925 	uint32_t tsync_ctl;
2926 
2927 	/* Disable timestamping of transmitted PTP packets. */
2928 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2929 	tsync_ctl &= ~NGBE_TSTXCTL_ENA;
2930 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2931 
2932 	/* Disable timestamping of received PTP packets. */
2933 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2934 	tsync_ctl &= ~NGBE_TSRXCTL_ENA;
2935 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2936 
2937 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2938 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
2939 
2940 	/* Stop incrementing the System Time registers. */
2941 	wr32(hw, NGBE_TSTIMEINC, 0);
2942 
2943 	return 0;
2944 }
2945 
2946 static int
2947 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
2948 				 struct timespec *timestamp,
2949 				 uint32_t flags __rte_unused)
2950 {
2951 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2952 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2953 	uint32_t tsync_rxctl;
2954 	uint64_t rx_tstamp_cycles;
2955 	uint64_t ns;
2956 
2957 	tsync_rxctl = rd32(hw, NGBE_TSRXCTL);
2958 	if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0)
2959 		return -EINVAL;
2960 
2961 	rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev);
2962 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
2963 	*timestamp = rte_ns_to_timespec(ns);
2964 
2965 	return  0;
2966 }
2967 
2968 static int
2969 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
2970 				 struct timespec *timestamp)
2971 {
2972 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2973 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2974 	uint32_t tsync_txctl;
2975 	uint64_t tx_tstamp_cycles;
2976 	uint64_t ns;
2977 
2978 	tsync_txctl = rd32(hw, NGBE_TSTXCTL);
2979 	if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0)
2980 		return -EINVAL;
2981 
2982 	tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev);
2983 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
2984 	*timestamp = rte_ns_to_timespec(ns);
2985 
2986 	return 0;
2987 }
2988 
2989 static int
2990 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
2991 {
2992 	int count = 0;
2993 	int g_ind = 0;
2994 	const struct reg_info *reg_group;
2995 	const struct reg_info **reg_set = ngbe_regs_others;
2996 
2997 	while ((reg_group = reg_set[g_ind++]))
2998 		count += ngbe_regs_group_count(reg_group);
2999 
3000 	return count;
3001 }
3002 
3003 static int
3004 ngbe_get_regs(struct rte_eth_dev *dev,
3005 	      struct rte_dev_reg_info *regs)
3006 {
3007 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3008 	uint32_t *data = regs->data;
3009 	int g_ind = 0;
3010 	int count = 0;
3011 	const struct reg_info *reg_group;
3012 	const struct reg_info **reg_set = ngbe_regs_others;
3013 
3014 	if (data == NULL) {
3015 		regs->length = ngbe_get_reg_length(dev);
3016 		regs->width = sizeof(uint32_t);
3017 		return 0;
3018 	}
3019 
3020 	/* Support only full register dump */
3021 	if (regs->length == 0 ||
3022 	    regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
3023 		regs->version = hw->mac.type << 24 |
3024 				hw->revision_id << 16 |
3025 				hw->device_id;
3026 		while ((reg_group = reg_set[g_ind++]))
3027 			count += ngbe_read_regs_group(dev, &data[count],
3028 						      reg_group);
3029 		return 0;
3030 	}
3031 
3032 	return -ENOTSUP;
3033 }
3034 
3035 static int
3036 ngbe_get_eeprom_length(struct rte_eth_dev *dev)
3037 {
3038 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3039 
3040 	/* Return unit is byte count */
3041 	return hw->rom.word_size * 2;
3042 }
3043 
3044 static int
3045 ngbe_get_eeprom(struct rte_eth_dev *dev,
3046 		struct rte_dev_eeprom_info *in_eeprom)
3047 {
3048 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3049 	struct ngbe_rom_info *eeprom = &hw->rom;
3050 	uint16_t *data = in_eeprom->data;
3051 	int first, length;
3052 
3053 	first = in_eeprom->offset >> 1;
3054 	length = in_eeprom->length >> 1;
3055 	if (first > hw->rom.word_size ||
3056 	    ((first + length) > hw->rom.word_size))
3057 		return -EINVAL;
3058 
3059 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3060 
3061 	return eeprom->readw_buffer(hw, first, length, data);
3062 }
3063 
3064 static int
3065 ngbe_set_eeprom(struct rte_eth_dev *dev,
3066 		struct rte_dev_eeprom_info *in_eeprom)
3067 {
3068 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3069 	struct ngbe_rom_info *eeprom = &hw->rom;
3070 	uint16_t *data = in_eeprom->data;
3071 	int first, length;
3072 
3073 	first = in_eeprom->offset >> 1;
3074 	length = in_eeprom->length >> 1;
3075 	if (first > hw->rom.word_size ||
3076 	    ((first + length) > hw->rom.word_size))
3077 		return -EINVAL;
3078 
3079 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3080 
3081 	return eeprom->writew_buffer(hw,  first, length, data);
3082 }
3083 
3084 static const struct eth_dev_ops ngbe_eth_dev_ops = {
3085 	.dev_configure              = ngbe_dev_configure,
3086 	.dev_infos_get              = ngbe_dev_info_get,
3087 	.dev_start                  = ngbe_dev_start,
3088 	.dev_stop                   = ngbe_dev_stop,
3089 	.dev_set_link_up            = ngbe_dev_set_link_up,
3090 	.dev_set_link_down          = ngbe_dev_set_link_down,
3091 	.dev_close                  = ngbe_dev_close,
3092 	.dev_reset                  = ngbe_dev_reset,
3093 	.promiscuous_enable         = ngbe_dev_promiscuous_enable,
3094 	.promiscuous_disable        = ngbe_dev_promiscuous_disable,
3095 	.allmulticast_enable        = ngbe_dev_allmulticast_enable,
3096 	.allmulticast_disable       = ngbe_dev_allmulticast_disable,
3097 	.link_update                = ngbe_dev_link_update,
3098 	.stats_get                  = ngbe_dev_stats_get,
3099 	.xstats_get                 = ngbe_dev_xstats_get,
3100 	.xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
3101 	.stats_reset                = ngbe_dev_stats_reset,
3102 	.xstats_reset               = ngbe_dev_xstats_reset,
3103 	.xstats_get_names           = ngbe_dev_xstats_get_names,
3104 	.xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
3105 	.fw_version_get             = ngbe_fw_version_get,
3106 	.dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
3107 	.mtu_set                    = ngbe_dev_mtu_set,
3108 	.vlan_filter_set            = ngbe_vlan_filter_set,
3109 	.vlan_tpid_set              = ngbe_vlan_tpid_set,
3110 	.vlan_offload_set           = ngbe_vlan_offload_set,
3111 	.vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
3112 	.rx_queue_start	            = ngbe_dev_rx_queue_start,
3113 	.rx_queue_stop              = ngbe_dev_rx_queue_stop,
3114 	.tx_queue_start	            = ngbe_dev_tx_queue_start,
3115 	.tx_queue_stop              = ngbe_dev_tx_queue_stop,
3116 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
3117 	.rx_queue_release           = ngbe_dev_rx_queue_release,
3118 	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
3119 	.tx_queue_release           = ngbe_dev_tx_queue_release,
3120 	.dev_led_on                 = ngbe_dev_led_on,
3121 	.dev_led_off                = ngbe_dev_led_off,
3122 	.flow_ctrl_get              = ngbe_flow_ctrl_get,
3123 	.flow_ctrl_set              = ngbe_flow_ctrl_set,
3124 	.mac_addr_add               = ngbe_add_rar,
3125 	.mac_addr_remove            = ngbe_remove_rar,
3126 	.mac_addr_set               = ngbe_set_default_mac_addr,
3127 	.uc_hash_table_set          = ngbe_uc_hash_table_set,
3128 	.uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
3129 	.reta_update                = ngbe_dev_rss_reta_update,
3130 	.reta_query                 = ngbe_dev_rss_reta_query,
3131 	.rss_hash_update            = ngbe_dev_rss_hash_update,
3132 	.rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
3133 	.set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
3134 	.rxq_info_get               = ngbe_rxq_info_get,
3135 	.txq_info_get               = ngbe_txq_info_get,
3136 	.rx_burst_mode_get          = ngbe_rx_burst_mode_get,
3137 	.tx_burst_mode_get          = ngbe_tx_burst_mode_get,
3138 	.timesync_enable            = ngbe_timesync_enable,
3139 	.timesync_disable           = ngbe_timesync_disable,
3140 	.timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
3141 	.timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp,
3142 	.get_reg                    = ngbe_get_regs,
3143 	.get_eeprom_length          = ngbe_get_eeprom_length,
3144 	.get_eeprom                 = ngbe_get_eeprom,
3145 	.set_eeprom                 = ngbe_set_eeprom,
3146 	.timesync_adjust_time       = ngbe_timesync_adjust_time,
3147 	.timesync_read_time         = ngbe_timesync_read_time,
3148 	.timesync_write_time        = ngbe_timesync_write_time,
3149 	.tx_done_cleanup            = ngbe_dev_tx_done_cleanup,
3150 };
3151 
3152 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
3153 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
3154 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
3155 
3156 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
3157 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
3158 
3159 #ifdef RTE_ETHDEV_DEBUG_RX
3160 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
3161 #endif
3162 #ifdef RTE_ETHDEV_DEBUG_TX
3163 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
3164 #endif
3165