xref: /dpdk/drivers/net/ngbe/ngbe_ethdev.c (revision b53d106d34b5c638f5a2cbdfee0da5bd42d4383f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9 
10 #include <rte_alarm.h>
11 
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16 #include "ngbe_regs_group.h"
17 
18 static const struct reg_info ngbe_regs_general[] = {
19 	{NGBE_RST, 1, 1, "NGBE_RST"},
20 	{NGBE_STAT, 1, 1, "NGBE_STAT"},
21 	{NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
22 	{NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
23 	{NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
24 	{NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
25 	{0, 0, 0, ""}
26 };
27 
28 static const struct reg_info ngbe_regs_nvm[] = {
29 	{0, 0, 0, ""}
30 };
31 
32 static const struct reg_info ngbe_regs_interrupt[] = {
33 	{0, 0, 0, ""}
34 };
35 
36 static const struct reg_info ngbe_regs_fctl_others[] = {
37 	{0, 0, 0, ""}
38 };
39 
40 static const struct reg_info ngbe_regs_rxdma[] = {
41 	{0, 0, 0, ""}
42 };
43 
44 static const struct reg_info ngbe_regs_rx[] = {
45 	{0, 0, 0, ""}
46 };
47 
48 static struct reg_info ngbe_regs_tx[] = {
49 	{0, 0, 0, ""}
50 };
51 
52 static const struct reg_info ngbe_regs_wakeup[] = {
53 	{0, 0, 0, ""}
54 };
55 
56 static const struct reg_info ngbe_regs_mac[] = {
57 	{0, 0, 0, ""}
58 };
59 
60 static const struct reg_info ngbe_regs_diagnostic[] = {
61 	{0, 0, 0, ""},
62 };
63 
64 /* PF registers */
65 static const struct reg_info *ngbe_regs_others[] = {
66 				ngbe_regs_general,
67 				ngbe_regs_nvm,
68 				ngbe_regs_interrupt,
69 				ngbe_regs_fctl_others,
70 				ngbe_regs_rxdma,
71 				ngbe_regs_rx,
72 				ngbe_regs_tx,
73 				ngbe_regs_wakeup,
74 				ngbe_regs_mac,
75 				ngbe_regs_diagnostic,
76 				NULL};
77 
78 static int ngbe_dev_close(struct rte_eth_dev *dev);
79 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
80 				int wait_to_complete);
81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
84 					uint16_t queue);
85 
86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
91 static void ngbe_dev_interrupt_handler(void *param);
92 static void ngbe_dev_interrupt_delayed_handler(void *param);
93 static void ngbe_configure_msix(struct rte_eth_dev *dev);
94 
95 #define NGBE_SET_HWSTRIP(h, q) do {\
96 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
97 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
98 		(h)->bitmap[idx] |= 1 << bit;\
99 	} while (0)
100 
101 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
102 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
103 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
104 		(h)->bitmap[idx] &= ~(1 << bit);\
105 	} while (0)
106 
107 #define NGBE_GET_HWSTRIP(h, q, r) do {\
108 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
109 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
110 		(r) = (h)->bitmap[idx] >> bit & 1;\
111 	} while (0)
112 
113 /*
114  * The set of PCI devices this driver supports
115  */
116 static const struct rte_pci_id pci_id_ngbe_map[] = {
117 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
118 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
119 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
120 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
121 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
122 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
123 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
124 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
128 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
129 	{ .vendor_id = 0, /* sentinel */ },
130 };
131 
132 static const struct rte_eth_desc_lim rx_desc_lim = {
133 	.nb_max = NGBE_RING_DESC_MAX,
134 	.nb_min = NGBE_RING_DESC_MIN,
135 	.nb_align = NGBE_RXD_ALIGN,
136 };
137 
138 static const struct rte_eth_desc_lim tx_desc_lim = {
139 	.nb_max = NGBE_RING_DESC_MAX,
140 	.nb_min = NGBE_RING_DESC_MIN,
141 	.nb_align = NGBE_TXD_ALIGN,
142 	.nb_seg_max = NGBE_TX_MAX_SEG,
143 	.nb_mtu_seg_max = NGBE_TX_MAX_SEG,
144 };
145 
146 static const struct eth_dev_ops ngbe_eth_dev_ops;
147 
148 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
149 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
150 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
151 	/* MNG RxTx */
152 	HW_XSTAT(mng_bmc2host_packets),
153 	HW_XSTAT(mng_host2bmc_packets),
154 	/* Basic RxTx */
155 	HW_XSTAT(rx_packets),
156 	HW_XSTAT(tx_packets),
157 	HW_XSTAT(rx_bytes),
158 	HW_XSTAT(tx_bytes),
159 	HW_XSTAT(rx_total_bytes),
160 	HW_XSTAT(rx_total_packets),
161 	HW_XSTAT(tx_total_packets),
162 	HW_XSTAT(rx_total_missed_packets),
163 	HW_XSTAT(rx_broadcast_packets),
164 	HW_XSTAT(rx_multicast_packets),
165 	HW_XSTAT(rx_management_packets),
166 	HW_XSTAT(tx_management_packets),
167 	HW_XSTAT(rx_management_dropped),
168 
169 	/* Basic Error */
170 	HW_XSTAT(rx_crc_errors),
171 	HW_XSTAT(rx_illegal_byte_errors),
172 	HW_XSTAT(rx_error_bytes),
173 	HW_XSTAT(rx_mac_short_packet_dropped),
174 	HW_XSTAT(rx_length_errors),
175 	HW_XSTAT(rx_undersize_errors),
176 	HW_XSTAT(rx_fragment_errors),
177 	HW_XSTAT(rx_oversize_errors),
178 	HW_XSTAT(rx_jabber_errors),
179 	HW_XSTAT(rx_l3_l4_xsum_error),
180 	HW_XSTAT(mac_local_errors),
181 	HW_XSTAT(mac_remote_errors),
182 
183 	/* MACSEC */
184 	HW_XSTAT(tx_macsec_pkts_untagged),
185 	HW_XSTAT(tx_macsec_pkts_encrypted),
186 	HW_XSTAT(tx_macsec_pkts_protected),
187 	HW_XSTAT(tx_macsec_octets_encrypted),
188 	HW_XSTAT(tx_macsec_octets_protected),
189 	HW_XSTAT(rx_macsec_pkts_untagged),
190 	HW_XSTAT(rx_macsec_pkts_badtag),
191 	HW_XSTAT(rx_macsec_pkts_nosci),
192 	HW_XSTAT(rx_macsec_pkts_unknownsci),
193 	HW_XSTAT(rx_macsec_octets_decrypted),
194 	HW_XSTAT(rx_macsec_octets_validated),
195 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
196 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
197 	HW_XSTAT(rx_macsec_sc_pkts_late),
198 	HW_XSTAT(rx_macsec_sa_pkts_ok),
199 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
200 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
201 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
202 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
203 
204 	/* MAC RxTx */
205 	HW_XSTAT(rx_size_64_packets),
206 	HW_XSTAT(rx_size_65_to_127_packets),
207 	HW_XSTAT(rx_size_128_to_255_packets),
208 	HW_XSTAT(rx_size_256_to_511_packets),
209 	HW_XSTAT(rx_size_512_to_1023_packets),
210 	HW_XSTAT(rx_size_1024_to_max_packets),
211 	HW_XSTAT(tx_size_64_packets),
212 	HW_XSTAT(tx_size_65_to_127_packets),
213 	HW_XSTAT(tx_size_128_to_255_packets),
214 	HW_XSTAT(tx_size_256_to_511_packets),
215 	HW_XSTAT(tx_size_512_to_1023_packets),
216 	HW_XSTAT(tx_size_1024_to_max_packets),
217 
218 	/* Flow Control */
219 	HW_XSTAT(tx_xon_packets),
220 	HW_XSTAT(rx_xon_packets),
221 	HW_XSTAT(tx_xoff_packets),
222 	HW_XSTAT(rx_xoff_packets),
223 
224 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
225 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
226 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
227 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
228 };
229 
230 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
231 			   sizeof(rte_ngbe_stats_strings[0]))
232 
233 /* Per-queue statistics */
234 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
235 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
236 	QP_XSTAT(rx_qp_packets),
237 	QP_XSTAT(tx_qp_packets),
238 	QP_XSTAT(rx_qp_bytes),
239 	QP_XSTAT(tx_qp_bytes),
240 	QP_XSTAT(rx_qp_mc_packets),
241 };
242 
243 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
244 			   sizeof(rte_ngbe_qp_strings[0]))
245 
246 static inline int32_t
247 ngbe_pf_reset_hw(struct ngbe_hw *hw)
248 {
249 	uint32_t ctrl_ext;
250 	int32_t status;
251 
252 	status = hw->mac.reset_hw(hw);
253 
254 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
255 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
256 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
257 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
258 	ngbe_flush(hw);
259 
260 	if (status == NGBE_ERR_SFP_NOT_PRESENT)
261 		status = 0;
262 	return status;
263 }
264 
265 static inline void
266 ngbe_enable_intr(struct rte_eth_dev *dev)
267 {
268 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
269 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
270 
271 	wr32(hw, NGBE_IENMISC, intr->mask_misc);
272 	wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
273 	ngbe_flush(hw);
274 }
275 
276 static void
277 ngbe_disable_intr(struct ngbe_hw *hw)
278 {
279 	PMD_INIT_FUNC_TRACE();
280 
281 	wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
282 	ngbe_flush(hw);
283 }
284 
285 /*
286  * Ensure that all locks are released before first NVM or PHY access
287  */
288 static void
289 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
290 {
291 	uint16_t mask;
292 
293 	/*
294 	 * These ones are more tricky since they are common to all ports; but
295 	 * swfw_sync retries last long enough (1s) to be almost sure that if
296 	 * lock can not be taken it is due to an improper lock of the
297 	 * semaphore.
298 	 */
299 	mask = NGBE_MNGSEM_SWPHY |
300 	       NGBE_MNGSEM_SWMBX |
301 	       NGBE_MNGSEM_SWFLASH;
302 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
303 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
304 
305 	hw->mac.release_swfw_sync(hw, mask);
306 }
307 
308 static int
309 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
310 {
311 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
312 	struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
313 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
314 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
315 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
316 	const struct rte_memzone *mz;
317 	uint32_t ctrl_ext;
318 	int err, ret;
319 
320 	PMD_INIT_FUNC_TRACE();
321 
322 	eth_dev->dev_ops = &ngbe_eth_dev_ops;
323 	eth_dev->rx_queue_count       = ngbe_dev_rx_queue_count;
324 	eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
325 	eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
326 	eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
327 	eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
328 	eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
329 
330 	/*
331 	 * For secondary processes, we don't initialise any further as primary
332 	 * has already done this work. Only check we don't need a different
333 	 * Rx and Tx function.
334 	 */
335 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
336 		struct ngbe_tx_queue *txq;
337 		/* Tx queue function in primary, set by last queue initialized
338 		 * Tx queue may not initialized by primary process
339 		 */
340 		if (eth_dev->data->tx_queues) {
341 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
342 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
343 			ngbe_set_tx_function(eth_dev, txq);
344 		} else {
345 			/* Use default Tx function if we get here */
346 			PMD_INIT_LOG(NOTICE,
347 				"No Tx queues configured yet. Using default Tx function.");
348 		}
349 
350 		ngbe_set_rx_function(eth_dev);
351 
352 		return 0;
353 	}
354 
355 	rte_eth_copy_pci_info(eth_dev, pci_dev);
356 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
357 
358 	/* Vendor and Device ID need to be set before init of shared code */
359 	hw->device_id = pci_dev->id.device_id;
360 	hw->vendor_id = pci_dev->id.vendor_id;
361 	hw->sub_system_id = pci_dev->id.subsystem_device_id;
362 	ngbe_map_device_id(hw);
363 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
364 
365 	/* Reserve memory for interrupt status block */
366 	mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
367 		NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
368 	if (mz == NULL)
369 		return -ENOMEM;
370 
371 	hw->isb_dma = TMZ_PADDR(mz);
372 	hw->isb_mem = TMZ_VADDR(mz);
373 
374 	/* Initialize the shared code (base driver) */
375 	err = ngbe_init_shared_code(hw);
376 	if (err != 0) {
377 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
378 		return -EIO;
379 	}
380 
381 	/* Unlock any pending hardware semaphore */
382 	ngbe_swfw_lock_reset(hw);
383 
384 	/* Get Hardware Flow Control setting */
385 	hw->fc.requested_mode = ngbe_fc_full;
386 	hw->fc.current_mode = ngbe_fc_full;
387 	hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
388 	hw->fc.low_water = NGBE_FC_XON_LOTH;
389 	hw->fc.high_water = NGBE_FC_XOFF_HITH;
390 	hw->fc.send_xon = 1;
391 
392 	err = hw->rom.init_params(hw);
393 	if (err != 0) {
394 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
395 		return -EIO;
396 	}
397 
398 	/* Make sure we have a good EEPROM before we read from it */
399 	err = hw->rom.validate_checksum(hw, NULL);
400 	if (err != 0) {
401 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
402 		return -EIO;
403 	}
404 
405 	err = hw->mac.init_hw(hw);
406 	if (err != 0) {
407 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
408 		return -EIO;
409 	}
410 
411 	/* Reset the hw statistics */
412 	ngbe_dev_stats_reset(eth_dev);
413 
414 	/* disable interrupt */
415 	ngbe_disable_intr(hw);
416 
417 	/* Allocate memory for storing MAC addresses */
418 	eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
419 					       hw->mac.num_rar_entries, 0);
420 	if (eth_dev->data->mac_addrs == NULL) {
421 		PMD_INIT_LOG(ERR,
422 			     "Failed to allocate %u bytes needed to store MAC addresses",
423 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
424 		return -ENOMEM;
425 	}
426 
427 	/* Copy the permanent MAC address */
428 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
429 			&eth_dev->data->mac_addrs[0]);
430 
431 	/* Allocate memory for storing hash filter MAC addresses */
432 	eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
433 			RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
434 	if (eth_dev->data->hash_mac_addrs == NULL) {
435 		PMD_INIT_LOG(ERR,
436 			     "Failed to allocate %d bytes needed to store MAC addresses",
437 			     RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
438 		rte_free(eth_dev->data->mac_addrs);
439 		eth_dev->data->mac_addrs = NULL;
440 		return -ENOMEM;
441 	}
442 
443 	/* initialize the vfta */
444 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
445 
446 	/* initialize the hw strip bitmap*/
447 	memset(hwstrip, 0, sizeof(*hwstrip));
448 
449 	/* initialize PF if max_vfs not zero */
450 	ret = ngbe_pf_host_init(eth_dev);
451 	if (ret) {
452 		rte_free(eth_dev->data->mac_addrs);
453 		eth_dev->data->mac_addrs = NULL;
454 		rte_free(eth_dev->data->hash_mac_addrs);
455 		eth_dev->data->hash_mac_addrs = NULL;
456 		return ret;
457 	}
458 
459 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
460 	/* let hardware know driver is loaded */
461 	ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
462 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
463 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
464 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
465 	ngbe_flush(hw);
466 
467 	PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
468 			(int)hw->mac.type, (int)hw->phy.type);
469 
470 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
471 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
472 		     pci_dev->id.device_id);
473 
474 	rte_intr_callback_register(intr_handle,
475 				   ngbe_dev_interrupt_handler, eth_dev);
476 
477 	/* enable uio/vfio intr/eventfd mapping */
478 	rte_intr_enable(intr_handle);
479 
480 	/* enable support intr */
481 	ngbe_enable_intr(eth_dev);
482 
483 	return 0;
484 }
485 
486 static int
487 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
488 {
489 	PMD_INIT_FUNC_TRACE();
490 
491 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
492 		return 0;
493 
494 	ngbe_dev_close(eth_dev);
495 
496 	return 0;
497 }
498 
499 static int
500 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
501 		struct rte_pci_device *pci_dev)
502 {
503 	return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
504 			sizeof(struct ngbe_adapter),
505 			eth_dev_pci_specific_init, pci_dev,
506 			eth_ngbe_dev_init, NULL);
507 }
508 
509 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
510 {
511 	struct rte_eth_dev *ethdev;
512 
513 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
514 	if (ethdev == NULL)
515 		return 0;
516 
517 	return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
518 }
519 
520 static struct rte_pci_driver rte_ngbe_pmd = {
521 	.id_table = pci_id_ngbe_map,
522 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
523 		     RTE_PCI_DRV_INTR_LSC,
524 	.probe = eth_ngbe_pci_probe,
525 	.remove = eth_ngbe_pci_remove,
526 };
527 
528 static int
529 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
530 {
531 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
532 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
533 	uint32_t vfta;
534 	uint32_t vid_idx;
535 	uint32_t vid_bit;
536 
537 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
538 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
539 	vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
540 	if (on)
541 		vfta |= vid_bit;
542 	else
543 		vfta &= ~vid_bit;
544 	wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
545 
546 	/* update local VFTA copy */
547 	shadow_vfta->vfta[vid_idx] = vfta;
548 
549 	return 0;
550 }
551 
552 static void
553 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
554 {
555 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
556 	struct ngbe_rx_queue *rxq;
557 	bool restart;
558 	uint32_t rxcfg, rxbal, rxbah;
559 
560 	if (on)
561 		ngbe_vlan_hw_strip_enable(dev, queue);
562 	else
563 		ngbe_vlan_hw_strip_disable(dev, queue);
564 
565 	rxq = dev->data->rx_queues[queue];
566 	rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
567 	rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
568 	rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
569 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
570 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
571 			!(rxcfg & NGBE_RXCFG_VLAN);
572 		rxcfg |= NGBE_RXCFG_VLAN;
573 	} else {
574 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
575 			(rxcfg & NGBE_RXCFG_VLAN);
576 		rxcfg &= ~NGBE_RXCFG_VLAN;
577 	}
578 	rxcfg &= ~NGBE_RXCFG_ENA;
579 
580 	if (restart) {
581 		/* set vlan strip for ring */
582 		ngbe_dev_rx_queue_stop(dev, queue);
583 		wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
584 		wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
585 		wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
586 		ngbe_dev_rx_queue_start(dev, queue);
587 	}
588 }
589 
590 static int
591 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
592 		    enum rte_vlan_type vlan_type,
593 		    uint16_t tpid)
594 {
595 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
596 	int ret = 0;
597 	uint32_t portctrl, vlan_ext, qinq;
598 
599 	portctrl = rd32(hw, NGBE_PORTCTL);
600 
601 	vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
602 	qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
603 	switch (vlan_type) {
604 	case RTE_ETH_VLAN_TYPE_INNER:
605 		if (vlan_ext) {
606 			wr32m(hw, NGBE_VLANCTL,
607 				NGBE_VLANCTL_TPID_MASK,
608 				NGBE_VLANCTL_TPID(tpid));
609 			wr32m(hw, NGBE_DMATXCTRL,
610 				NGBE_DMATXCTRL_TPID_MASK,
611 				NGBE_DMATXCTRL_TPID(tpid));
612 		} else {
613 			ret = -ENOTSUP;
614 			PMD_DRV_LOG(ERR,
615 				"Inner type is not supported by single VLAN");
616 		}
617 
618 		if (qinq) {
619 			wr32m(hw, NGBE_TAGTPID(0),
620 				NGBE_TAGTPID_LSB_MASK,
621 				NGBE_TAGTPID_LSB(tpid));
622 		}
623 		break;
624 	case RTE_ETH_VLAN_TYPE_OUTER:
625 		if (vlan_ext) {
626 			/* Only the high 16-bits is valid */
627 			wr32m(hw, NGBE_EXTAG,
628 				NGBE_EXTAG_VLAN_MASK,
629 				NGBE_EXTAG_VLAN(tpid));
630 		} else {
631 			wr32m(hw, NGBE_VLANCTL,
632 				NGBE_VLANCTL_TPID_MASK,
633 				NGBE_VLANCTL_TPID(tpid));
634 			wr32m(hw, NGBE_DMATXCTRL,
635 				NGBE_DMATXCTRL_TPID_MASK,
636 				NGBE_DMATXCTRL_TPID(tpid));
637 		}
638 
639 		if (qinq) {
640 			wr32m(hw, NGBE_TAGTPID(0),
641 				NGBE_TAGTPID_MSB_MASK,
642 				NGBE_TAGTPID_MSB(tpid));
643 		}
644 		break;
645 	default:
646 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
647 		return -EINVAL;
648 	}
649 
650 	return ret;
651 }
652 
653 void
654 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
655 {
656 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
657 	uint32_t vlnctrl;
658 
659 	PMD_INIT_FUNC_TRACE();
660 
661 	/* Filter Table Disable */
662 	vlnctrl = rd32(hw, NGBE_VLANCTL);
663 	vlnctrl &= ~NGBE_VLANCTL_VFE;
664 	wr32(hw, NGBE_VLANCTL, vlnctrl);
665 }
666 
667 void
668 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
669 {
670 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
671 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
672 	uint32_t vlnctrl;
673 	uint16_t i;
674 
675 	PMD_INIT_FUNC_TRACE();
676 
677 	/* Filter Table Enable */
678 	vlnctrl = rd32(hw, NGBE_VLANCTL);
679 	vlnctrl &= ~NGBE_VLANCTL_CFIENA;
680 	vlnctrl |= NGBE_VLANCTL_VFE;
681 	wr32(hw, NGBE_VLANCTL, vlnctrl);
682 
683 	/* write whatever is in local vfta copy */
684 	for (i = 0; i < NGBE_VFTA_SIZE; i++)
685 		wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
686 }
687 
688 void
689 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
690 {
691 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
692 	struct ngbe_rx_queue *rxq;
693 
694 	if (queue >= NGBE_MAX_RX_QUEUE_NUM)
695 		return;
696 
697 	if (on)
698 		NGBE_SET_HWSTRIP(hwstrip, queue);
699 	else
700 		NGBE_CLEAR_HWSTRIP(hwstrip, queue);
701 
702 	if (queue >= dev->data->nb_rx_queues)
703 		return;
704 
705 	rxq = dev->data->rx_queues[queue];
706 
707 	if (on) {
708 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
709 		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
710 	} else {
711 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
712 		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
713 	}
714 }
715 
716 static void
717 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
718 {
719 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
720 	uint32_t ctrl;
721 
722 	PMD_INIT_FUNC_TRACE();
723 
724 	ctrl = rd32(hw, NGBE_RXCFG(queue));
725 	ctrl &= ~NGBE_RXCFG_VLAN;
726 	wr32(hw, NGBE_RXCFG(queue), ctrl);
727 
728 	/* record those setting for HW strip per queue */
729 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
730 }
731 
732 static void
733 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
734 {
735 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
736 	uint32_t ctrl;
737 
738 	PMD_INIT_FUNC_TRACE();
739 
740 	ctrl = rd32(hw, NGBE_RXCFG(queue));
741 	ctrl |= NGBE_RXCFG_VLAN;
742 	wr32(hw, NGBE_RXCFG(queue), ctrl);
743 
744 	/* record those setting for HW strip per queue */
745 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
746 }
747 
748 static void
749 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
750 {
751 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
752 	uint32_t ctrl;
753 
754 	PMD_INIT_FUNC_TRACE();
755 
756 	ctrl = rd32(hw, NGBE_PORTCTL);
757 	ctrl &= ~NGBE_PORTCTL_VLANEXT;
758 	ctrl &= ~NGBE_PORTCTL_QINQ;
759 	wr32(hw, NGBE_PORTCTL, ctrl);
760 }
761 
762 static void
763 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
764 {
765 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
766 	uint32_t ctrl;
767 
768 	PMD_INIT_FUNC_TRACE();
769 
770 	ctrl  = rd32(hw, NGBE_PORTCTL);
771 	ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
772 	wr32(hw, NGBE_PORTCTL, ctrl);
773 }
774 
775 static void
776 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
777 {
778 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
779 	uint32_t ctrl;
780 
781 	PMD_INIT_FUNC_TRACE();
782 
783 	ctrl = rd32(hw, NGBE_PORTCTL);
784 	ctrl &= ~NGBE_PORTCTL_QINQ;
785 	wr32(hw, NGBE_PORTCTL, ctrl);
786 }
787 
788 static void
789 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
790 {
791 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
792 	uint32_t ctrl;
793 
794 	PMD_INIT_FUNC_TRACE();
795 
796 	ctrl  = rd32(hw, NGBE_PORTCTL);
797 	ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
798 	wr32(hw, NGBE_PORTCTL, ctrl);
799 }
800 
801 void
802 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
803 {
804 	struct ngbe_rx_queue *rxq;
805 	uint16_t i;
806 
807 	PMD_INIT_FUNC_TRACE();
808 
809 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
810 		rxq = dev->data->rx_queues[i];
811 
812 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
813 			ngbe_vlan_hw_strip_enable(dev, i);
814 		else
815 			ngbe_vlan_hw_strip_disable(dev, i);
816 	}
817 }
818 
819 void
820 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
821 {
822 	uint16_t i;
823 	struct rte_eth_rxmode *rxmode;
824 	struct ngbe_rx_queue *rxq;
825 
826 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
827 		rxmode = &dev->data->dev_conf.rxmode;
828 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
829 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
830 				rxq = dev->data->rx_queues[i];
831 				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
832 			}
833 		else
834 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
835 				rxq = dev->data->rx_queues[i];
836 				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
837 			}
838 	}
839 }
840 
841 static int
842 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
843 {
844 	struct rte_eth_rxmode *rxmode;
845 	rxmode = &dev->data->dev_conf.rxmode;
846 
847 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
848 		ngbe_vlan_hw_strip_config(dev);
849 
850 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
851 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
852 			ngbe_vlan_hw_filter_enable(dev);
853 		else
854 			ngbe_vlan_hw_filter_disable(dev);
855 	}
856 
857 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
858 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
859 			ngbe_vlan_hw_extend_enable(dev);
860 		else
861 			ngbe_vlan_hw_extend_disable(dev);
862 	}
863 
864 	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
865 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
866 			ngbe_qinq_hw_strip_enable(dev);
867 		else
868 			ngbe_qinq_hw_strip_disable(dev);
869 	}
870 
871 	return 0;
872 }
873 
874 static int
875 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
876 {
877 	ngbe_config_vlan_strip_on_all_queues(dev, mask);
878 
879 	ngbe_vlan_offload_config(dev, mask);
880 
881 	return 0;
882 }
883 
884 static int
885 ngbe_dev_configure(struct rte_eth_dev *dev)
886 {
887 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
888 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
889 
890 	PMD_INIT_FUNC_TRACE();
891 
892 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
893 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
894 
895 	/* set flag to update link status after init */
896 	intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
897 
898 	/*
899 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
900 	 * allocation Rx preconditions we will reset it.
901 	 */
902 	adapter->rx_bulk_alloc_allowed = true;
903 
904 	return 0;
905 }
906 
907 static void
908 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
909 {
910 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
911 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
912 
913 	wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
914 	wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
915 	wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
916 	if (hw->phy.type == ngbe_phy_yt8521s_sfi)
917 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
918 	else
919 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
920 
921 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
922 }
923 
924 /*
925  * Configure device link speed and setup link.
926  * It returns 0 on success.
927  */
928 static int
929 ngbe_dev_start(struct rte_eth_dev *dev)
930 {
931 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
932 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
933 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
934 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
935 	uint32_t intr_vector = 0;
936 	int err;
937 	bool link_up = false, negotiate = false;
938 	uint32_t speed = 0;
939 	uint32_t allowed_speeds = 0;
940 	int mask = 0;
941 	int status;
942 	uint32_t *link_speeds;
943 
944 	PMD_INIT_FUNC_TRACE();
945 
946 	/* disable uio/vfio intr/eventfd mapping */
947 	rte_intr_disable(intr_handle);
948 
949 	/* stop adapter */
950 	hw->adapter_stopped = 0;
951 	ngbe_stop_hw(hw);
952 
953 	/* reinitialize adapter, this calls reset and start */
954 	hw->nb_rx_queues = dev->data->nb_rx_queues;
955 	hw->nb_tx_queues = dev->data->nb_tx_queues;
956 	status = ngbe_pf_reset_hw(hw);
957 	if (status != 0)
958 		return -1;
959 	hw->mac.start_hw(hw);
960 	hw->mac.get_link_status = true;
961 
962 	/* configure PF module if SRIOV enabled */
963 	ngbe_pf_host_configure(dev);
964 
965 	ngbe_dev_phy_intr_setup(dev);
966 
967 	/* check and configure queue intr-vector mapping */
968 	if ((rte_intr_cap_multiple(intr_handle) ||
969 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
970 	    dev->data->dev_conf.intr_conf.rxq != 0) {
971 		intr_vector = dev->data->nb_rx_queues;
972 		if (rte_intr_efd_enable(intr_handle, intr_vector))
973 			return -1;
974 	}
975 
976 	if (rte_intr_dp_is_en(intr_handle)) {
977 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
978 						   dev->data->nb_rx_queues)) {
979 			PMD_INIT_LOG(ERR,
980 				     "Failed to allocate %d rx_queues intr_vec",
981 				     dev->data->nb_rx_queues);
982 			return -ENOMEM;
983 		}
984 	}
985 
986 	/* confiugre MSI-X for sleep until Rx interrupt */
987 	ngbe_configure_msix(dev);
988 
989 	/* initialize transmission unit */
990 	ngbe_dev_tx_init(dev);
991 
992 	/* This can fail when allocating mbufs for descriptor rings */
993 	err = ngbe_dev_rx_init(dev);
994 	if (err != 0) {
995 		PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
996 		goto error;
997 	}
998 
999 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1000 		RTE_ETH_VLAN_EXTEND_MASK;
1001 	err = ngbe_vlan_offload_config(dev, mask);
1002 	if (err != 0) {
1003 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1004 		goto error;
1005 	}
1006 
1007 	ngbe_configure_port(dev);
1008 
1009 	err = ngbe_dev_rxtx_start(dev);
1010 	if (err < 0) {
1011 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1012 		goto error;
1013 	}
1014 
1015 	/* Skip link setup if loopback mode is enabled. */
1016 	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1017 		goto skip_link_setup;
1018 
1019 	err = hw->mac.check_link(hw, &speed, &link_up, 0);
1020 	if (err != 0)
1021 		goto error;
1022 	dev->data->dev_link.link_status = link_up;
1023 
1024 	link_speeds = &dev->data->dev_conf.link_speeds;
1025 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
1026 		negotiate = true;
1027 
1028 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1029 	if (err != 0)
1030 		goto error;
1031 
1032 	allowed_speeds = 0;
1033 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1034 		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
1035 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1036 		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
1037 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1038 		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
1039 
1040 	if (*link_speeds & ~allowed_speeds) {
1041 		PMD_INIT_LOG(ERR, "Invalid link setting");
1042 		goto error;
1043 	}
1044 
1045 	speed = 0x0;
1046 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1047 		speed = hw->mac.default_speeds;
1048 	} else {
1049 		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1050 			speed |= NGBE_LINK_SPEED_1GB_FULL;
1051 		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1052 			speed |= NGBE_LINK_SPEED_100M_FULL;
1053 		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
1054 			speed |= NGBE_LINK_SPEED_10M_FULL;
1055 	}
1056 
1057 	hw->phy.init_hw(hw);
1058 	err = hw->mac.setup_link(hw, speed, link_up);
1059 	if (err != 0)
1060 		goto error;
1061 
1062 skip_link_setup:
1063 
1064 	if (rte_intr_allow_others(intr_handle)) {
1065 		ngbe_dev_misc_interrupt_setup(dev);
1066 		/* check if lsc interrupt is enabled */
1067 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1068 			ngbe_dev_lsc_interrupt_setup(dev, TRUE);
1069 		else
1070 			ngbe_dev_lsc_interrupt_setup(dev, FALSE);
1071 		ngbe_dev_macsec_interrupt_setup(dev);
1072 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1073 	} else {
1074 		rte_intr_callback_unregister(intr_handle,
1075 					     ngbe_dev_interrupt_handler, dev);
1076 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1077 			PMD_INIT_LOG(INFO,
1078 				     "LSC won't enable because of no intr multiplex");
1079 	}
1080 
1081 	/* check if rxq interrupt is enabled */
1082 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1083 	    rte_intr_dp_is_en(intr_handle))
1084 		ngbe_dev_rxq_interrupt_setup(dev);
1085 
1086 	/* enable UIO/VFIO intr/eventfd mapping */
1087 	rte_intr_enable(intr_handle);
1088 
1089 	/* resume enabled intr since HW reset */
1090 	ngbe_enable_intr(dev);
1091 
1092 	if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1093 		(hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1094 		/* gpio0 is used to power on/off control*/
1095 		wr32(hw, NGBE_GPIODATA, 0);
1096 	}
1097 
1098 	/*
1099 	 * Update link status right before return, because it may
1100 	 * start link configuration process in a separate thread.
1101 	 */
1102 	ngbe_dev_link_update(dev, 0);
1103 
1104 	ngbe_read_stats_registers(hw, hw_stats);
1105 	hw->offset_loaded = 1;
1106 
1107 	return 0;
1108 
1109 error:
1110 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1111 	ngbe_dev_clear_queues(dev);
1112 	return -EIO;
1113 }
1114 
1115 /*
1116  * Stop device: disable rx and tx functions to allow for reconfiguring.
1117  */
1118 static int
1119 ngbe_dev_stop(struct rte_eth_dev *dev)
1120 {
1121 	struct rte_eth_link link;
1122 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1123 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1124 	struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1125 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1126 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1127 	int vf;
1128 
1129 	if (hw->adapter_stopped)
1130 		return 0;
1131 
1132 	PMD_INIT_FUNC_TRACE();
1133 
1134 	if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
1135 		(hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
1136 		/* gpio0 is used to power on/off control*/
1137 		wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1138 	}
1139 
1140 	/* disable interrupts */
1141 	ngbe_disable_intr(hw);
1142 
1143 	/* reset the NIC */
1144 	ngbe_pf_reset_hw(hw);
1145 	hw->adapter_stopped = 0;
1146 
1147 	/* stop adapter */
1148 	ngbe_stop_hw(hw);
1149 
1150 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1151 		vfinfo[vf].clear_to_send = false;
1152 
1153 	ngbe_dev_clear_queues(dev);
1154 
1155 	/* Clear stored conf */
1156 	dev->data->scattered_rx = 0;
1157 
1158 	/* Clear recorded link status */
1159 	memset(&link, 0, sizeof(link));
1160 	rte_eth_linkstatus_set(dev, &link);
1161 
1162 	if (!rte_intr_allow_others(intr_handle))
1163 		/* resume to the default handler */
1164 		rte_intr_callback_register(intr_handle,
1165 					   ngbe_dev_interrupt_handler,
1166 					   (void *)dev);
1167 
1168 	/* Clean datapath event and queue/vec mapping */
1169 	rte_intr_efd_disable(intr_handle);
1170 	rte_intr_vec_list_free(intr_handle);
1171 
1172 	adapter->rss_reta_updated = 0;
1173 
1174 	hw->adapter_stopped = true;
1175 	dev->data->dev_started = 0;
1176 
1177 	return 0;
1178 }
1179 
1180 /*
1181  * Reset and stop device.
1182  */
1183 static int
1184 ngbe_dev_close(struct rte_eth_dev *dev)
1185 {
1186 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1187 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1188 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1189 	int retries = 0;
1190 	int ret;
1191 
1192 	PMD_INIT_FUNC_TRACE();
1193 
1194 	ngbe_pf_reset_hw(hw);
1195 
1196 	ngbe_dev_stop(dev);
1197 
1198 	ngbe_dev_free_queues(dev);
1199 
1200 	/* reprogram the RAR[0] in case user changed it. */
1201 	ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1202 
1203 	/* Unlock any pending hardware semaphore */
1204 	ngbe_swfw_lock_reset(hw);
1205 
1206 	/* disable uio intr before callback unregister */
1207 	rte_intr_disable(intr_handle);
1208 
1209 	do {
1210 		ret = rte_intr_callback_unregister(intr_handle,
1211 				ngbe_dev_interrupt_handler, dev);
1212 		if (ret >= 0 || ret == -ENOENT) {
1213 			break;
1214 		} else if (ret != -EAGAIN) {
1215 			PMD_INIT_LOG(ERR,
1216 				"intr callback unregister failed: %d",
1217 				ret);
1218 		}
1219 		rte_delay_ms(100);
1220 	} while (retries++ < (10 + NGBE_LINK_UP_TIME));
1221 
1222 	/* uninitialize PF if max_vfs not zero */
1223 	ngbe_pf_host_uninit(dev);
1224 
1225 	rte_free(dev->data->mac_addrs);
1226 	dev->data->mac_addrs = NULL;
1227 
1228 	rte_free(dev->data->hash_mac_addrs);
1229 	dev->data->hash_mac_addrs = NULL;
1230 
1231 	return ret;
1232 }
1233 
1234 /*
1235  * Reset PF device.
1236  */
1237 static int
1238 ngbe_dev_reset(struct rte_eth_dev *dev)
1239 {
1240 	int ret;
1241 
1242 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
1243 	 * its VF to make them align with it. The detailed notification
1244 	 * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1245 	 * To avoid unexpected behavior in VF, currently reset of PF with
1246 	 * SR-IOV activation is not supported. It might be supported later.
1247 	 */
1248 	if (dev->data->sriov.active)
1249 		return -ENOTSUP;
1250 
1251 	ret = eth_ngbe_dev_uninit(dev);
1252 	if (ret != 0)
1253 		return ret;
1254 
1255 	ret = eth_ngbe_dev_init(dev, NULL);
1256 
1257 	return ret;
1258 }
1259 
1260 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1261 	{                                                       \
1262 		uint32_t current_counter = rd32(hw, reg);       \
1263 		if (current_counter < last_counter)             \
1264 			current_counter += 0x100000000LL;       \
1265 		if (!hw->offset_loaded)                         \
1266 			last_counter = current_counter;         \
1267 		counter = current_counter - last_counter;       \
1268 		counter &= 0xFFFFFFFFLL;                        \
1269 	}
1270 
1271 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1272 	{                                                                \
1273 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1274 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1275 		uint64_t current_counter = (current_counter_msb << 32) | \
1276 			current_counter_lsb;                             \
1277 		if (current_counter < last_counter)                      \
1278 			current_counter += 0x1000000000LL;               \
1279 		if (!hw->offset_loaded)                                  \
1280 			last_counter = current_counter;                  \
1281 		counter = current_counter - last_counter;                \
1282 		counter &= 0xFFFFFFFFFLL;                                \
1283 	}
1284 
1285 void
1286 ngbe_read_stats_registers(struct ngbe_hw *hw,
1287 			   struct ngbe_hw_stats *hw_stats)
1288 {
1289 	unsigned int i;
1290 
1291 	/* QP Stats */
1292 	for (i = 0; i < hw->nb_rx_queues; i++) {
1293 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1294 			hw->qp_last[i].rx_qp_packets,
1295 			hw_stats->qp[i].rx_qp_packets);
1296 		UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1297 			hw->qp_last[i].rx_qp_bytes,
1298 			hw_stats->qp[i].rx_qp_bytes);
1299 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1300 			hw->qp_last[i].rx_qp_mc_packets,
1301 			hw_stats->qp[i].rx_qp_mc_packets);
1302 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1303 			hw->qp_last[i].rx_qp_bc_packets,
1304 			hw_stats->qp[i].rx_qp_bc_packets);
1305 	}
1306 
1307 	for (i = 0; i < hw->nb_tx_queues; i++) {
1308 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1309 			hw->qp_last[i].tx_qp_packets,
1310 			hw_stats->qp[i].tx_qp_packets);
1311 		UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1312 			hw->qp_last[i].tx_qp_bytes,
1313 			hw_stats->qp[i].tx_qp_bytes);
1314 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1315 			hw->qp_last[i].tx_qp_mc_packets,
1316 			hw_stats->qp[i].tx_qp_mc_packets);
1317 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1318 			hw->qp_last[i].tx_qp_bc_packets,
1319 			hw_stats->qp[i].tx_qp_bc_packets);
1320 	}
1321 
1322 	/* PB Stats */
1323 	hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1324 	hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1325 	hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1326 	hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1327 	hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1328 	hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1329 
1330 	hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1331 	hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1332 
1333 	/* DMA Stats */
1334 	hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1335 	hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1336 	hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1337 	hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1338 	hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1339 	hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1340 	hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1341 	hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1342 
1343 	/* MAC Stats */
1344 	hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1345 	hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1346 	hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1347 
1348 	hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1349 	hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1350 	hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1351 
1352 	hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1353 	hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1354 
1355 	hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1356 	hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1357 	hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1358 	hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1359 	hw_stats->rx_size_512_to_1023_packets +=
1360 			rd64(hw, NGBE_MACRX512TO1023L);
1361 	hw_stats->rx_size_1024_to_max_packets +=
1362 			rd64(hw, NGBE_MACRX1024TOMAXL);
1363 	hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1364 	hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1365 	hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1366 	hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1367 	hw_stats->tx_size_512_to_1023_packets +=
1368 			rd64(hw, NGBE_MACTX512TO1023L);
1369 	hw_stats->tx_size_1024_to_max_packets +=
1370 			rd64(hw, NGBE_MACTX1024TOMAXL);
1371 
1372 	hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1373 	hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1374 	hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1375 
1376 	/* MNG Stats */
1377 	hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1378 	hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1379 	hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1380 	hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1381 
1382 	/* MACsec Stats */
1383 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1384 	hw_stats->tx_macsec_pkts_encrypted +=
1385 			rd32(hw, NGBE_LSECTX_ENCPKT);
1386 	hw_stats->tx_macsec_pkts_protected +=
1387 			rd32(hw, NGBE_LSECTX_PROTPKT);
1388 	hw_stats->tx_macsec_octets_encrypted +=
1389 			rd32(hw, NGBE_LSECTX_ENCOCT);
1390 	hw_stats->tx_macsec_octets_protected +=
1391 			rd32(hw, NGBE_LSECTX_PROTOCT);
1392 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1393 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1394 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1395 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1396 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1397 	hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1398 	hw_stats->rx_macsec_sc_pkts_unchecked +=
1399 			rd32(hw, NGBE_LSECRX_UNCHKPKT);
1400 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1401 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1402 	for (i = 0; i < 2; i++) {
1403 		hw_stats->rx_macsec_sa_pkts_ok +=
1404 			rd32(hw, NGBE_LSECRX_OKPKT(i));
1405 		hw_stats->rx_macsec_sa_pkts_invalid +=
1406 			rd32(hw, NGBE_LSECRX_INVPKT(i));
1407 		hw_stats->rx_macsec_sa_pkts_notvalid +=
1408 			rd32(hw, NGBE_LSECRX_BADPKT(i));
1409 	}
1410 	for (i = 0; i < 4; i++) {
1411 		hw_stats->rx_macsec_sa_pkts_unusedsa +=
1412 			rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1413 		hw_stats->rx_macsec_sa_pkts_notusingsa +=
1414 			rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1415 	}
1416 	hw_stats->rx_total_missed_packets =
1417 			hw_stats->rx_up_dropped;
1418 }
1419 
1420 static int
1421 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1422 {
1423 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1424 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1425 	struct ngbe_stat_mappings *stat_mappings =
1426 			NGBE_DEV_STAT_MAPPINGS(dev);
1427 	uint32_t i, j;
1428 
1429 	ngbe_read_stats_registers(hw, hw_stats);
1430 
1431 	if (stats == NULL)
1432 		return -EINVAL;
1433 
1434 	/* Fill out the rte_eth_stats statistics structure */
1435 	stats->ipackets = hw_stats->rx_packets;
1436 	stats->ibytes = hw_stats->rx_bytes;
1437 	stats->opackets = hw_stats->tx_packets;
1438 	stats->obytes = hw_stats->tx_bytes;
1439 
1440 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1441 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1442 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1443 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1444 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1445 	for (i = 0; i < NGBE_MAX_QP; i++) {
1446 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1447 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1448 		uint32_t q_map;
1449 
1450 		q_map = (stat_mappings->rqsm[n] >> offset)
1451 				& QMAP_FIELD_RESERVED_BITS_MASK;
1452 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1453 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1454 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1455 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1456 
1457 		q_map = (stat_mappings->tqsm[n] >> offset)
1458 				& QMAP_FIELD_RESERVED_BITS_MASK;
1459 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1460 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1461 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1462 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1463 	}
1464 
1465 	/* Rx Errors */
1466 	stats->imissed  = hw_stats->rx_total_missed_packets +
1467 			  hw_stats->rx_dma_drop;
1468 	stats->ierrors  = hw_stats->rx_crc_errors +
1469 			  hw_stats->rx_mac_short_packet_dropped +
1470 			  hw_stats->rx_length_errors +
1471 			  hw_stats->rx_undersize_errors +
1472 			  hw_stats->rx_oversize_errors +
1473 			  hw_stats->rx_illegal_byte_errors +
1474 			  hw_stats->rx_error_bytes +
1475 			  hw_stats->rx_fragment_errors;
1476 
1477 	/* Tx Errors */
1478 	stats->oerrors  = 0;
1479 	return 0;
1480 }
1481 
1482 static int
1483 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1484 {
1485 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1486 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1487 
1488 	/* HW registers are cleared on read */
1489 	hw->offset_loaded = 0;
1490 	ngbe_dev_stats_get(dev, NULL);
1491 	hw->offset_loaded = 1;
1492 
1493 	/* Reset software totals */
1494 	memset(hw_stats, 0, sizeof(*hw_stats));
1495 
1496 	return 0;
1497 }
1498 
1499 /* This function calculates the number of xstats based on the current config */
1500 static unsigned
1501 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1502 {
1503 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1504 	return NGBE_NB_HW_STATS +
1505 	       NGBE_NB_QP_STATS * nb_queues;
1506 }
1507 
1508 static inline int
1509 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1510 {
1511 	int nb, st;
1512 
1513 	/* Extended stats from ngbe_hw_stats */
1514 	if (id < NGBE_NB_HW_STATS) {
1515 		snprintf(name, size, "[hw]%s",
1516 			rte_ngbe_stats_strings[id].name);
1517 		return 0;
1518 	}
1519 	id -= NGBE_NB_HW_STATS;
1520 
1521 	/* Queue Stats */
1522 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1523 		nb = id / NGBE_NB_QP_STATS;
1524 		st = id % NGBE_NB_QP_STATS;
1525 		snprintf(name, size, "[q%u]%s", nb,
1526 			rte_ngbe_qp_strings[st].name);
1527 		return 0;
1528 	}
1529 	id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1530 
1531 	return -(int)(id + 1);
1532 }
1533 
1534 static inline int
1535 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1536 {
1537 	int nb, st;
1538 
1539 	/* Extended stats from ngbe_hw_stats */
1540 	if (id < NGBE_NB_HW_STATS) {
1541 		*offset = rte_ngbe_stats_strings[id].offset;
1542 		return 0;
1543 	}
1544 	id -= NGBE_NB_HW_STATS;
1545 
1546 	/* Queue Stats */
1547 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1548 		nb = id / NGBE_NB_QP_STATS;
1549 		st = id % NGBE_NB_QP_STATS;
1550 		*offset = rte_ngbe_qp_strings[st].offset +
1551 			nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1552 		return 0;
1553 	}
1554 
1555 	return -1;
1556 }
1557 
1558 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1559 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1560 {
1561 	unsigned int i, count;
1562 
1563 	count = ngbe_xstats_calc_num(dev);
1564 	if (xstats_names == NULL)
1565 		return count;
1566 
1567 	/* Note: limit >= cnt_stats checked upstream
1568 	 * in rte_eth_xstats_names()
1569 	 */
1570 	limit = min(limit, count);
1571 
1572 	/* Extended stats from ngbe_hw_stats */
1573 	for (i = 0; i < limit; i++) {
1574 		if (ngbe_get_name_by_id(i, xstats_names[i].name,
1575 			sizeof(xstats_names[i].name))) {
1576 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1577 			break;
1578 		}
1579 	}
1580 
1581 	return i;
1582 }
1583 
1584 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1585 	const uint64_t *ids,
1586 	struct rte_eth_xstat_name *xstats_names,
1587 	unsigned int limit)
1588 {
1589 	unsigned int i;
1590 
1591 	if (ids == NULL)
1592 		return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1593 
1594 	for (i = 0; i < limit; i++) {
1595 		if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1596 				sizeof(xstats_names[i].name))) {
1597 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1598 			return -1;
1599 		}
1600 	}
1601 
1602 	return i;
1603 }
1604 
1605 static int
1606 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1607 					 unsigned int limit)
1608 {
1609 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1610 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1611 	unsigned int i, count;
1612 
1613 	ngbe_read_stats_registers(hw, hw_stats);
1614 
1615 	/* If this is a reset xstats is NULL, and we have cleared the
1616 	 * registers by reading them.
1617 	 */
1618 	count = ngbe_xstats_calc_num(dev);
1619 	if (xstats == NULL)
1620 		return count;
1621 
1622 	limit = min(limit, ngbe_xstats_calc_num(dev));
1623 
1624 	/* Extended stats from ngbe_hw_stats */
1625 	for (i = 0; i < limit; i++) {
1626 		uint32_t offset = 0;
1627 
1628 		if (ngbe_get_offset_by_id(i, &offset)) {
1629 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1630 			break;
1631 		}
1632 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1633 		xstats[i].id = i;
1634 	}
1635 
1636 	return i;
1637 }
1638 
1639 static int
1640 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1641 					 unsigned int limit)
1642 {
1643 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1644 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1645 	unsigned int i, count;
1646 
1647 	ngbe_read_stats_registers(hw, hw_stats);
1648 
1649 	/* If this is a reset xstats is NULL, and we have cleared the
1650 	 * registers by reading them.
1651 	 */
1652 	count = ngbe_xstats_calc_num(dev);
1653 	if (values == NULL)
1654 		return count;
1655 
1656 	limit = min(limit, ngbe_xstats_calc_num(dev));
1657 
1658 	/* Extended stats from ngbe_hw_stats */
1659 	for (i = 0; i < limit; i++) {
1660 		uint32_t offset;
1661 
1662 		if (ngbe_get_offset_by_id(i, &offset)) {
1663 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1664 			break;
1665 		}
1666 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1667 	}
1668 
1669 	return i;
1670 }
1671 
1672 static int
1673 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1674 		uint64_t *values, unsigned int limit)
1675 {
1676 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1677 	unsigned int i;
1678 
1679 	if (ids == NULL)
1680 		return ngbe_dev_xstats_get_(dev, values, limit);
1681 
1682 	for (i = 0; i < limit; i++) {
1683 		uint32_t offset;
1684 
1685 		if (ngbe_get_offset_by_id(ids[i], &offset)) {
1686 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1687 			break;
1688 		}
1689 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1690 	}
1691 
1692 	return i;
1693 }
1694 
1695 static int
1696 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1697 {
1698 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1699 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1700 
1701 	/* HW registers are cleared on read */
1702 	hw->offset_loaded = 0;
1703 	ngbe_read_stats_registers(hw, hw_stats);
1704 	hw->offset_loaded = 1;
1705 
1706 	/* Reset software totals */
1707 	memset(hw_stats, 0, sizeof(*hw_stats));
1708 
1709 	return 0;
1710 }
1711 
1712 static int
1713 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1714 {
1715 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1716 	int ret;
1717 
1718 	ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1719 
1720 	if (ret < 0)
1721 		return -EINVAL;
1722 
1723 	ret += 1; /* add the size of '\0' */
1724 	if (fw_size < (size_t)ret)
1725 		return ret;
1726 
1727 	return 0;
1728 }
1729 
1730 static int
1731 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1732 {
1733 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1734 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1735 
1736 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1737 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1738 	dev_info->min_rx_bufsize = 1024;
1739 	dev_info->max_rx_pktlen = 15872;
1740 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1741 	dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1742 	dev_info->max_vfs = pci_dev->max_vfs;
1743 	dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1744 	dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1745 				     dev_info->rx_queue_offload_capa);
1746 	dev_info->tx_queue_offload_capa = 0;
1747 	dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1748 
1749 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1750 		.rx_thresh = {
1751 			.pthresh = NGBE_DEFAULT_RX_PTHRESH,
1752 			.hthresh = NGBE_DEFAULT_RX_HTHRESH,
1753 			.wthresh = NGBE_DEFAULT_RX_WTHRESH,
1754 		},
1755 		.rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1756 		.rx_drop_en = 0,
1757 		.offloads = 0,
1758 	};
1759 
1760 	dev_info->default_txconf = (struct rte_eth_txconf) {
1761 		.tx_thresh = {
1762 			.pthresh = NGBE_DEFAULT_TX_PTHRESH,
1763 			.hthresh = NGBE_DEFAULT_TX_HTHRESH,
1764 			.wthresh = NGBE_DEFAULT_TX_WTHRESH,
1765 		},
1766 		.tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1767 		.offloads = 0,
1768 	};
1769 
1770 	dev_info->rx_desc_lim = rx_desc_lim;
1771 	dev_info->tx_desc_lim = tx_desc_lim;
1772 
1773 	dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1774 	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1775 	dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1776 
1777 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1778 				RTE_ETH_LINK_SPEED_10M;
1779 
1780 	/* Driver-preferred Rx/Tx parameters */
1781 	dev_info->default_rxportconf.burst_size = 32;
1782 	dev_info->default_txportconf.burst_size = 32;
1783 	dev_info->default_rxportconf.nb_queues = 1;
1784 	dev_info->default_txportconf.nb_queues = 1;
1785 	dev_info->default_rxportconf.ring_size = 256;
1786 	dev_info->default_txportconf.ring_size = 256;
1787 
1788 	return 0;
1789 }
1790 
1791 const uint32_t *
1792 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1793 {
1794 	if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1795 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1796 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1797 	    dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1798 		return ngbe_get_supported_ptypes();
1799 
1800 	return NULL;
1801 }
1802 
1803 /* return 0 means link status changed, -1 means not changed */
1804 int
1805 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1806 			    int wait_to_complete)
1807 {
1808 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1809 	struct rte_eth_link link;
1810 	u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1811 	u32 lan_speed = 0;
1812 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1813 	bool link_up;
1814 	int err;
1815 	int wait = 1;
1816 
1817 	memset(&link, 0, sizeof(link));
1818 	link.link_status = RTE_ETH_LINK_DOWN;
1819 	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1820 	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1821 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1822 			~RTE_ETH_LINK_SPEED_AUTONEG);
1823 
1824 	hw->mac.get_link_status = true;
1825 
1826 	if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1827 		return rte_eth_linkstatus_set(dev, &link);
1828 
1829 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
1830 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1831 		wait = 0;
1832 
1833 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1834 	if (err != 0) {
1835 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1836 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1837 		return rte_eth_linkstatus_set(dev, &link);
1838 	}
1839 
1840 	if (!link_up)
1841 		return rte_eth_linkstatus_set(dev, &link);
1842 
1843 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1844 	link.link_status = RTE_ETH_LINK_UP;
1845 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1846 
1847 	switch (link_speed) {
1848 	default:
1849 	case NGBE_LINK_SPEED_UNKNOWN:
1850 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1851 		break;
1852 
1853 	case NGBE_LINK_SPEED_10M_FULL:
1854 		link.link_speed = RTE_ETH_SPEED_NUM_10M;
1855 		lan_speed = 0;
1856 		break;
1857 
1858 	case NGBE_LINK_SPEED_100M_FULL:
1859 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
1860 		lan_speed = 1;
1861 		break;
1862 
1863 	case NGBE_LINK_SPEED_1GB_FULL:
1864 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
1865 		lan_speed = 2;
1866 		break;
1867 	}
1868 
1869 	if (hw->is_pf) {
1870 		wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1871 		if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1872 				NGBE_LINK_SPEED_100M_FULL |
1873 				NGBE_LINK_SPEED_10M_FULL)) {
1874 			wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1875 				NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1876 		}
1877 	}
1878 
1879 	return rte_eth_linkstatus_set(dev, &link);
1880 }
1881 
1882 static int
1883 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1884 {
1885 	return ngbe_dev_link_update_share(dev, wait_to_complete);
1886 }
1887 
1888 static int
1889 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1890 {
1891 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1892 	uint32_t fctrl;
1893 
1894 	fctrl = rd32(hw, NGBE_PSRCTL);
1895 	fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1896 	wr32(hw, NGBE_PSRCTL, fctrl);
1897 
1898 	return 0;
1899 }
1900 
1901 static int
1902 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1903 {
1904 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1905 	uint32_t fctrl;
1906 
1907 	fctrl = rd32(hw, NGBE_PSRCTL);
1908 	fctrl &= (~NGBE_PSRCTL_UCP);
1909 	if (dev->data->all_multicast == 1)
1910 		fctrl |= NGBE_PSRCTL_MCP;
1911 	else
1912 		fctrl &= (~NGBE_PSRCTL_MCP);
1913 	wr32(hw, NGBE_PSRCTL, fctrl);
1914 
1915 	return 0;
1916 }
1917 
1918 static int
1919 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1920 {
1921 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1922 	uint32_t fctrl;
1923 
1924 	fctrl = rd32(hw, NGBE_PSRCTL);
1925 	fctrl |= NGBE_PSRCTL_MCP;
1926 	wr32(hw, NGBE_PSRCTL, fctrl);
1927 
1928 	return 0;
1929 }
1930 
1931 static int
1932 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1933 {
1934 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1935 	uint32_t fctrl;
1936 
1937 	if (dev->data->promiscuous == 1)
1938 		return 0; /* must remain in all_multicast mode */
1939 
1940 	fctrl = rd32(hw, NGBE_PSRCTL);
1941 	fctrl &= (~NGBE_PSRCTL_MCP);
1942 	wr32(hw, NGBE_PSRCTL, fctrl);
1943 
1944 	return 0;
1945 }
1946 
1947 /**
1948  * It clears the interrupt causes and enables the interrupt.
1949  * It will be called once only during NIC initialized.
1950  *
1951  * @param dev
1952  *  Pointer to struct rte_eth_dev.
1953  * @param on
1954  *  Enable or Disable.
1955  *
1956  * @return
1957  *  - On success, zero.
1958  *  - On failure, a negative value.
1959  */
1960 static int
1961 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
1962 {
1963 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1964 
1965 	ngbe_dev_link_status_print(dev);
1966 	if (on != 0) {
1967 		intr->mask_misc |= NGBE_ICRMISC_PHY;
1968 		intr->mask_misc |= NGBE_ICRMISC_GPIO;
1969 	} else {
1970 		intr->mask_misc &= ~NGBE_ICRMISC_PHY;
1971 		intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
1972 	}
1973 
1974 	return 0;
1975 }
1976 
1977 /**
1978  * It clears the interrupt causes and enables the interrupt.
1979  * It will be called once only during NIC initialized.
1980  *
1981  * @param dev
1982  *  Pointer to struct rte_eth_dev.
1983  *
1984  * @return
1985  *  - On success, zero.
1986  *  - On failure, a negative value.
1987  */
1988 static int
1989 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
1990 {
1991 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1992 	u64 mask;
1993 
1994 	mask = NGBE_ICR_MASK;
1995 	mask &= (1ULL << NGBE_MISC_VEC_ID);
1996 	intr->mask |= mask;
1997 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
1998 
1999 	return 0;
2000 }
2001 
2002 /**
2003  * It clears the interrupt causes and enables the interrupt.
2004  * It will be called once only during NIC initialized.
2005  *
2006  * @param dev
2007  *  Pointer to struct rte_eth_dev.
2008  *
2009  * @return
2010  *  - On success, zero.
2011  *  - On failure, a negative value.
2012  */
2013 static int
2014 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2015 {
2016 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2017 	u64 mask;
2018 
2019 	mask = NGBE_ICR_MASK;
2020 	mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
2021 	intr->mask |= mask;
2022 
2023 	return 0;
2024 }
2025 
2026 /**
2027  * It clears the interrupt causes and enables the interrupt.
2028  * It will be called once only during NIC initialized.
2029  *
2030  * @param dev
2031  *  Pointer to struct rte_eth_dev.
2032  *
2033  * @return
2034  *  - On success, zero.
2035  *  - On failure, a negative value.
2036  */
2037 static int
2038 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2039 {
2040 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2041 
2042 	intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
2043 
2044 	return 0;
2045 }
2046 
2047 /*
2048  * It reads ICR and sets flag for the link_update.
2049  *
2050  * @param dev
2051  *  Pointer to struct rte_eth_dev.
2052  *
2053  * @return
2054  *  - On success, zero.
2055  *  - On failure, a negative value.
2056  */
2057 static int
2058 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2059 {
2060 	uint32_t eicr;
2061 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2062 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2063 
2064 	/* clear all cause mask */
2065 	ngbe_disable_intr(hw);
2066 
2067 	/* read-on-clear nic registers here */
2068 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2069 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2070 
2071 	intr->flags = 0;
2072 
2073 	/* set flag for async link update */
2074 	if (eicr & NGBE_ICRMISC_PHY)
2075 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2076 
2077 	if (eicr & NGBE_ICRMISC_VFMBX)
2078 		intr->flags |= NGBE_FLAG_MAILBOX;
2079 
2080 	if (eicr & NGBE_ICRMISC_LNKSEC)
2081 		intr->flags |= NGBE_FLAG_MACSEC;
2082 
2083 	if (eicr & NGBE_ICRMISC_GPIO)
2084 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2085 
2086 	return 0;
2087 }
2088 
2089 /**
2090  * It gets and then prints the link status.
2091  *
2092  * @param dev
2093  *  Pointer to struct rte_eth_dev.
2094  *
2095  * @return
2096  *  - On success, zero.
2097  *  - On failure, a negative value.
2098  */
2099 static void
2100 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2101 {
2102 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2103 	struct rte_eth_link link;
2104 
2105 	rte_eth_linkstatus_get(dev, &link);
2106 
2107 	if (link.link_status == RTE_ETH_LINK_UP) {
2108 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2109 					(int)(dev->data->port_id),
2110 					(unsigned int)link.link_speed,
2111 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2112 					"full-duplex" : "half-duplex");
2113 	} else {
2114 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
2115 				(int)(dev->data->port_id));
2116 	}
2117 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2118 				pci_dev->addr.domain,
2119 				pci_dev->addr.bus,
2120 				pci_dev->addr.devid,
2121 				pci_dev->addr.function);
2122 }
2123 
2124 /*
2125  * It executes link_update after knowing an interrupt occurred.
2126  *
2127  * @param dev
2128  *  Pointer to struct rte_eth_dev.
2129  *
2130  * @return
2131  *  - On success, zero.
2132  *  - On failure, a negative value.
2133  */
2134 static int
2135 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2136 {
2137 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2138 	int64_t timeout;
2139 
2140 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2141 
2142 	if (intr->flags & NGBE_FLAG_MAILBOX) {
2143 		ngbe_pf_mbx_process(dev);
2144 		intr->flags &= ~NGBE_FLAG_MAILBOX;
2145 	}
2146 
2147 	if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2148 		struct rte_eth_link link;
2149 
2150 		/*get the link status before link update, for predicting later*/
2151 		rte_eth_linkstatus_get(dev, &link);
2152 
2153 		ngbe_dev_link_update(dev, 0);
2154 
2155 		/* likely to up */
2156 		if (link.link_status != RTE_ETH_LINK_UP)
2157 			/* handle it 1 sec later, wait it being stable */
2158 			timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
2159 		/* likely to down */
2160 		else
2161 			/* handle it 4 sec later, wait it being stable */
2162 			timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
2163 
2164 		ngbe_dev_link_status_print(dev);
2165 		if (rte_eal_alarm_set(timeout * 1000,
2166 				      ngbe_dev_interrupt_delayed_handler,
2167 				      (void *)dev) < 0) {
2168 			PMD_DRV_LOG(ERR, "Error setting alarm");
2169 		} else {
2170 			/* remember original mask */
2171 			intr->mask_misc_orig = intr->mask_misc;
2172 			/* only disable lsc interrupt */
2173 			intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2174 
2175 			intr->mask_orig = intr->mask;
2176 			/* only disable all misc interrupts */
2177 			intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
2178 		}
2179 	}
2180 
2181 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
2182 	ngbe_enable_intr(dev);
2183 
2184 	return 0;
2185 }
2186 
2187 /**
2188  * Interrupt handler which shall be registered for alarm callback for delayed
2189  * handling specific interrupt to wait for the stable nic state. As the
2190  * NIC interrupt state is not stable for ngbe after link is just down,
2191  * it needs to wait 4 seconds to get the stable status.
2192  *
2193  * @param param
2194  *  The address of parameter (struct rte_eth_dev *) registered before.
2195  */
2196 static void
2197 ngbe_dev_interrupt_delayed_handler(void *param)
2198 {
2199 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2200 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2201 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2202 	uint32_t eicr;
2203 
2204 	ngbe_disable_intr(hw);
2205 
2206 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2207 	if (eicr & NGBE_ICRMISC_VFMBX)
2208 		ngbe_pf_mbx_process(dev);
2209 
2210 	if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2211 		ngbe_dev_link_update(dev, 0);
2212 		intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2213 		ngbe_dev_link_status_print(dev);
2214 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2215 					      NULL);
2216 	}
2217 
2218 	if (intr->flags & NGBE_FLAG_MACSEC) {
2219 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2220 					      NULL);
2221 		intr->flags &= ~NGBE_FLAG_MACSEC;
2222 	}
2223 
2224 	/* restore original mask */
2225 	intr->mask_misc = intr->mask_misc_orig;
2226 	intr->mask_misc_orig = 0;
2227 	intr->mask = intr->mask_orig;
2228 	intr->mask_orig = 0;
2229 
2230 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2231 	ngbe_enable_intr(dev);
2232 }
2233 
2234 /**
2235  * Interrupt handler triggered by NIC  for handling
2236  * specific interrupt.
2237  *
2238  * @param param
2239  *  The address of parameter (struct rte_eth_dev *) registered before.
2240  */
2241 static void
2242 ngbe_dev_interrupt_handler(void *param)
2243 {
2244 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2245 
2246 	ngbe_dev_interrupt_get_status(dev);
2247 	ngbe_dev_interrupt_action(dev);
2248 }
2249 
2250 static int
2251 ngbe_dev_led_on(struct rte_eth_dev *dev)
2252 {
2253 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2254 	return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
2255 }
2256 
2257 static int
2258 ngbe_dev_led_off(struct rte_eth_dev *dev)
2259 {
2260 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2261 	return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
2262 }
2263 
2264 static int
2265 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2266 {
2267 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2268 	uint32_t mflcn_reg;
2269 	uint32_t fccfg_reg;
2270 	int rx_pause;
2271 	int tx_pause;
2272 
2273 	fc_conf->pause_time = hw->fc.pause_time;
2274 	fc_conf->high_water = hw->fc.high_water;
2275 	fc_conf->low_water = hw->fc.low_water;
2276 	fc_conf->send_xon = hw->fc.send_xon;
2277 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2278 
2279 	/*
2280 	 * Return rx_pause status according to actual setting of
2281 	 * RXFCCFG register.
2282 	 */
2283 	mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2284 	if (mflcn_reg & NGBE_RXFCCFG_FC)
2285 		rx_pause = 1;
2286 	else
2287 		rx_pause = 0;
2288 
2289 	/*
2290 	 * Return tx_pause status according to actual setting of
2291 	 * TXFCCFG register.
2292 	 */
2293 	fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2294 	if (fccfg_reg & NGBE_TXFCCFG_FC)
2295 		tx_pause = 1;
2296 	else
2297 		tx_pause = 0;
2298 
2299 	if (rx_pause && tx_pause)
2300 		fc_conf->mode = RTE_ETH_FC_FULL;
2301 	else if (rx_pause)
2302 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2303 	else if (tx_pause)
2304 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2305 	else
2306 		fc_conf->mode = RTE_ETH_FC_NONE;
2307 
2308 	return 0;
2309 }
2310 
2311 static int
2312 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2313 {
2314 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2315 	int err;
2316 	uint32_t rx_buf_size;
2317 	uint32_t max_high_water;
2318 	enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2319 		ngbe_fc_none,
2320 		ngbe_fc_rx_pause,
2321 		ngbe_fc_tx_pause,
2322 		ngbe_fc_full
2323 	};
2324 
2325 	PMD_INIT_FUNC_TRACE();
2326 
2327 	rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2328 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2329 
2330 	/*
2331 	 * At least reserve one Ethernet frame for watermark
2332 	 * high_water/low_water in kilo bytes for ngbe
2333 	 */
2334 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2335 	if (fc_conf->high_water > max_high_water ||
2336 	    fc_conf->high_water < fc_conf->low_water) {
2337 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2338 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2339 		return -EINVAL;
2340 	}
2341 
2342 	hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2343 	hw->fc.pause_time     = fc_conf->pause_time;
2344 	hw->fc.high_water     = fc_conf->high_water;
2345 	hw->fc.low_water      = fc_conf->low_water;
2346 	hw->fc.send_xon       = fc_conf->send_xon;
2347 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2348 
2349 	err = hw->mac.fc_enable(hw);
2350 
2351 	/* Not negotiated is not an error case */
2352 	if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2353 		wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2354 		      (fc_conf->mac_ctrl_frame_fwd
2355 		       ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2356 		ngbe_flush(hw);
2357 
2358 		return 0;
2359 	}
2360 
2361 	PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2362 	return -EIO;
2363 }
2364 
2365 int
2366 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2367 			  struct rte_eth_rss_reta_entry64 *reta_conf,
2368 			  uint16_t reta_size)
2369 {
2370 	uint8_t i, j, mask;
2371 	uint32_t reta;
2372 	uint16_t idx, shift;
2373 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2374 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2375 
2376 	PMD_INIT_FUNC_TRACE();
2377 
2378 	if (!hw->is_pf) {
2379 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2380 			"NIC.");
2381 		return -ENOTSUP;
2382 	}
2383 
2384 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2385 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2386 			"(%d) doesn't match the number hardware can supported "
2387 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2388 		return -EINVAL;
2389 	}
2390 
2391 	for (i = 0; i < reta_size; i += 4) {
2392 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2393 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2394 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2395 		if (!mask)
2396 			continue;
2397 
2398 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2399 		for (j = 0; j < 4; j++) {
2400 			if (RS8(mask, j, 0x1)) {
2401 				reta  &= ~(MS32(8 * j, 0xFF));
2402 				reta |= LS32(reta_conf[idx].reta[shift + j],
2403 						8 * j, 0xFF);
2404 			}
2405 		}
2406 		wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2407 	}
2408 	adapter->rss_reta_updated = 1;
2409 
2410 	return 0;
2411 }
2412 
2413 int
2414 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2415 			 struct rte_eth_rss_reta_entry64 *reta_conf,
2416 			 uint16_t reta_size)
2417 {
2418 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2419 	uint8_t i, j, mask;
2420 	uint32_t reta;
2421 	uint16_t idx, shift;
2422 
2423 	PMD_INIT_FUNC_TRACE();
2424 
2425 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2426 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2427 			"(%d) doesn't match the number hardware can supported "
2428 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2429 		return -EINVAL;
2430 	}
2431 
2432 	for (i = 0; i < reta_size; i += 4) {
2433 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2434 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2435 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2436 		if (!mask)
2437 			continue;
2438 
2439 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2440 		for (j = 0; j < 4; j++) {
2441 			if (RS8(mask, j, 0x1))
2442 				reta_conf[idx].reta[shift + j] =
2443 					(uint16_t)RS32(reta, 8 * j, 0xFF);
2444 		}
2445 	}
2446 
2447 	return 0;
2448 }
2449 
2450 static int
2451 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2452 				uint32_t index, uint32_t pool)
2453 {
2454 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2455 	uint32_t enable_addr = 1;
2456 
2457 	return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2458 			     pool, enable_addr);
2459 }
2460 
2461 static void
2462 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2463 {
2464 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2465 
2466 	ngbe_clear_rar(hw, index);
2467 }
2468 
2469 static int
2470 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2471 {
2472 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2473 
2474 	ngbe_remove_rar(dev, 0);
2475 	ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2476 
2477 	return 0;
2478 }
2479 
2480 static int
2481 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2482 {
2483 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2484 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2485 	struct rte_eth_dev_data *dev_data = dev->data;
2486 
2487 	/* If device is started, refuse mtu that requires the support of
2488 	 * scattered packets when this feature has not been enabled before.
2489 	 */
2490 	if (dev_data->dev_started && !dev_data->scattered_rx &&
2491 	    (frame_size + 2 * RTE_VLAN_HLEN >
2492 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2493 		PMD_INIT_LOG(ERR, "Stop port first.");
2494 		return -EINVAL;
2495 	}
2496 
2497 	if (hw->mode)
2498 		wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2499 			NGBE_FRAME_SIZE_MAX);
2500 	else
2501 		wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2502 			NGBE_FRMSZ_MAX(frame_size));
2503 
2504 	return 0;
2505 }
2506 
2507 static uint32_t
2508 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2509 {
2510 	uint32_t vector = 0;
2511 
2512 	switch (hw->mac.mc_filter_type) {
2513 	case 0:   /* use bits [47:36] of the address */
2514 		vector = ((uc_addr->addr_bytes[4] >> 4) |
2515 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
2516 		break;
2517 	case 1:   /* use bits [46:35] of the address */
2518 		vector = ((uc_addr->addr_bytes[4] >> 3) |
2519 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
2520 		break;
2521 	case 2:   /* use bits [45:34] of the address */
2522 		vector = ((uc_addr->addr_bytes[4] >> 2) |
2523 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
2524 		break;
2525 	case 3:   /* use bits [43:32] of the address */
2526 		vector = ((uc_addr->addr_bytes[4]) |
2527 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
2528 		break;
2529 	default:  /* Invalid mc_filter_type */
2530 		break;
2531 	}
2532 
2533 	/* vector can only be 12-bits or boundary will be exceeded */
2534 	vector &= 0xFFF;
2535 	return vector;
2536 }
2537 
2538 static int
2539 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2540 			struct rte_ether_addr *mac_addr, uint8_t on)
2541 {
2542 	uint32_t vector;
2543 	uint32_t uta_idx;
2544 	uint32_t reg_val;
2545 	uint32_t uta_mask;
2546 	uint32_t psrctl;
2547 
2548 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2549 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2550 
2551 	vector = ngbe_uta_vector(hw, mac_addr);
2552 	uta_idx = (vector >> 5) & 0x7F;
2553 	uta_mask = 0x1UL << (vector & 0x1F);
2554 
2555 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2556 		return 0;
2557 
2558 	reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2559 	if (on) {
2560 		uta_info->uta_in_use++;
2561 		reg_val |= uta_mask;
2562 		uta_info->uta_shadow[uta_idx] |= uta_mask;
2563 	} else {
2564 		uta_info->uta_in_use--;
2565 		reg_val &= ~uta_mask;
2566 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2567 	}
2568 
2569 	wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2570 
2571 	psrctl = rd32(hw, NGBE_PSRCTL);
2572 	if (uta_info->uta_in_use > 0)
2573 		psrctl |= NGBE_PSRCTL_UCHFENA;
2574 	else
2575 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2576 
2577 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2578 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2579 	wr32(hw, NGBE_PSRCTL, psrctl);
2580 
2581 	return 0;
2582 }
2583 
2584 static int
2585 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2586 {
2587 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2588 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2589 	uint32_t psrctl;
2590 	int i;
2591 
2592 	if (on) {
2593 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2594 			uta_info->uta_shadow[i] = ~0;
2595 			wr32(hw, NGBE_UCADDRTBL(i), ~0);
2596 		}
2597 	} else {
2598 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2599 			uta_info->uta_shadow[i] = 0;
2600 			wr32(hw, NGBE_UCADDRTBL(i), 0);
2601 		}
2602 	}
2603 
2604 	psrctl = rd32(hw, NGBE_PSRCTL);
2605 	if (on)
2606 		psrctl |= NGBE_PSRCTL_UCHFENA;
2607 	else
2608 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2609 
2610 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2611 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2612 	wr32(hw, NGBE_PSRCTL, psrctl);
2613 
2614 	return 0;
2615 }
2616 
2617 /**
2618  * Set the IVAR registers, mapping interrupt causes to vectors
2619  * @param hw
2620  *  pointer to ngbe_hw struct
2621  * @direction
2622  *  0 for Rx, 1 for Tx, -1 for other causes
2623  * @queue
2624  *  queue to map the corresponding interrupt to
2625  * @msix_vector
2626  *  the vector to map to the corresponding queue
2627  */
2628 void
2629 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2630 		   uint8_t queue, uint8_t msix_vector)
2631 {
2632 	uint32_t tmp, idx;
2633 
2634 	if (direction == -1) {
2635 		/* other causes */
2636 		msix_vector |= NGBE_IVARMISC_VLD;
2637 		idx = 0;
2638 		tmp = rd32(hw, NGBE_IVARMISC);
2639 		tmp &= ~(0xFF << idx);
2640 		tmp |= (msix_vector << idx);
2641 		wr32(hw, NGBE_IVARMISC, tmp);
2642 	} else {
2643 		/* rx or tx causes */
2644 		/* Workround for ICR lost */
2645 		idx = ((16 * (queue & 1)) + (8 * direction));
2646 		tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2647 		tmp &= ~(0xFF << idx);
2648 		tmp |= (msix_vector << idx);
2649 		wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2650 	}
2651 }
2652 
2653 /**
2654  * Sets up the hardware to properly generate MSI-X interrupts
2655  * @hw
2656  *  board private structure
2657  */
2658 static void
2659 ngbe_configure_msix(struct rte_eth_dev *dev)
2660 {
2661 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2662 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2663 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2664 	uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2665 	uint32_t vec = NGBE_MISC_VEC_ID;
2666 	uint32_t gpie;
2667 
2668 	/*
2669 	 * Won't configure MSI-X register if no mapping is done
2670 	 * between intr vector and event fd
2671 	 * but if MSI-X has been enabled already, need to configure
2672 	 * auto clean, auto mask and throttling.
2673 	 */
2674 	gpie = rd32(hw, NGBE_GPIE);
2675 	if (!rte_intr_dp_is_en(intr_handle) &&
2676 	    !(gpie & NGBE_GPIE_MSIX))
2677 		return;
2678 
2679 	if (rte_intr_allow_others(intr_handle)) {
2680 		base = NGBE_RX_VEC_START;
2681 		vec = base;
2682 	}
2683 
2684 	/* setup GPIE for MSI-X mode */
2685 	gpie = rd32(hw, NGBE_GPIE);
2686 	gpie |= NGBE_GPIE_MSIX;
2687 	wr32(hw, NGBE_GPIE, gpie);
2688 
2689 	/* Populate the IVAR table and set the ITR values to the
2690 	 * corresponding register.
2691 	 */
2692 	if (rte_intr_dp_is_en(intr_handle)) {
2693 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2694 			queue_id++) {
2695 			/* by default, 1:1 mapping */
2696 			ngbe_set_ivar_map(hw, 0, queue_id, vec);
2697 			rte_intr_vec_list_index_set(intr_handle,
2698 							   queue_id, vec);
2699 			if (vec < base + rte_intr_nb_efd_get(intr_handle)
2700 			    - 1)
2701 				vec++;
2702 		}
2703 
2704 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2705 	}
2706 	wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2707 			NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2708 			| NGBE_ITR_WRDSA);
2709 }
2710 
2711 static u8 *
2712 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2713 			u8 **mc_addr_ptr, u32 *vmdq)
2714 {
2715 	u8 *mc_addr;
2716 
2717 	*vmdq = 0;
2718 	mc_addr = *mc_addr_ptr;
2719 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2720 	return mc_addr;
2721 }
2722 
2723 int
2724 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2725 			  struct rte_ether_addr *mc_addr_set,
2726 			  uint32_t nb_mc_addr)
2727 {
2728 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2729 	u8 *mc_addr_list;
2730 
2731 	mc_addr_list = (u8 *)mc_addr_set;
2732 	return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2733 					 ngbe_dev_addr_list_itr, TRUE);
2734 }
2735 
2736 static uint64_t
2737 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
2738 {
2739 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2740 	uint64_t systime_cycles;
2741 
2742 	systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
2743 	systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
2744 
2745 	return systime_cycles;
2746 }
2747 
2748 static uint64_t
2749 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2750 {
2751 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2752 	uint64_t rx_tstamp_cycles;
2753 
2754 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
2755 	rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
2756 	rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
2757 
2758 	return rx_tstamp_cycles;
2759 }
2760 
2761 static uint64_t
2762 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2763 {
2764 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2765 	uint64_t tx_tstamp_cycles;
2766 
2767 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
2768 	tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
2769 	tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
2770 
2771 	return tx_tstamp_cycles;
2772 }
2773 
2774 static void
2775 ngbe_start_timecounters(struct rte_eth_dev *dev)
2776 {
2777 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2778 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2779 	uint32_t incval = 0;
2780 	uint32_t shift = 0;
2781 
2782 	incval = NGBE_INCVAL_1GB;
2783 	shift = NGBE_INCVAL_SHIFT_1GB;
2784 
2785 	wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
2786 
2787 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
2788 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2789 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2790 
2791 	adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2792 	adapter->systime_tc.cc_shift = shift;
2793 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
2794 
2795 	adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2796 	adapter->rx_tstamp_tc.cc_shift = shift;
2797 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2798 
2799 	adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2800 	adapter->tx_tstamp_tc.cc_shift = shift;
2801 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2802 }
2803 
2804 static int
2805 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2806 {
2807 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2808 
2809 	adapter->systime_tc.nsec += delta;
2810 	adapter->rx_tstamp_tc.nsec += delta;
2811 	adapter->tx_tstamp_tc.nsec += delta;
2812 
2813 	return 0;
2814 }
2815 
2816 static int
2817 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2818 {
2819 	uint64_t ns;
2820 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2821 
2822 	ns = rte_timespec_to_ns(ts);
2823 	/* Set the timecounters to a new value. */
2824 	adapter->systime_tc.nsec = ns;
2825 	adapter->rx_tstamp_tc.nsec = ns;
2826 	adapter->tx_tstamp_tc.nsec = ns;
2827 
2828 	return 0;
2829 }
2830 
2831 static int
2832 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2833 {
2834 	uint64_t ns, systime_cycles;
2835 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2836 
2837 	systime_cycles = ngbe_read_systime_cyclecounter(dev);
2838 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
2839 	*ts = rte_ns_to_timespec(ns);
2840 
2841 	return 0;
2842 }
2843 
2844 static int
2845 ngbe_timesync_enable(struct rte_eth_dev *dev)
2846 {
2847 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2848 	uint32_t tsync_ctl;
2849 
2850 	/* Stop the timesync system time. */
2851 	wr32(hw, NGBE_TSTIMEINC, 0x0);
2852 	/* Reset the timesync system time value. */
2853 	wr32(hw, NGBE_TSTIMEL, 0x0);
2854 	wr32(hw, NGBE_TSTIMEH, 0x0);
2855 
2856 	ngbe_start_timecounters(dev);
2857 
2858 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2859 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588),
2860 		RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588);
2861 
2862 	/* Enable timestamping of received PTP packets. */
2863 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2864 	tsync_ctl |= NGBE_TSRXCTL_ENA;
2865 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2866 
2867 	/* Enable timestamping of transmitted PTP packets. */
2868 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2869 	tsync_ctl |= NGBE_TSTXCTL_ENA;
2870 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2871 
2872 	ngbe_flush(hw);
2873 
2874 	return 0;
2875 }
2876 
2877 static int
2878 ngbe_timesync_disable(struct rte_eth_dev *dev)
2879 {
2880 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2881 	uint32_t tsync_ctl;
2882 
2883 	/* Disable timestamping of transmitted PTP packets. */
2884 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2885 	tsync_ctl &= ~NGBE_TSTXCTL_ENA;
2886 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2887 
2888 	/* Disable timestamping of received PTP packets. */
2889 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2890 	tsync_ctl &= ~NGBE_TSRXCTL_ENA;
2891 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2892 
2893 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2894 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
2895 
2896 	/* Stop incrementating the System Time registers. */
2897 	wr32(hw, NGBE_TSTIMEINC, 0);
2898 
2899 	return 0;
2900 }
2901 
2902 static int
2903 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
2904 				 struct timespec *timestamp,
2905 				 uint32_t flags __rte_unused)
2906 {
2907 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2908 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2909 	uint32_t tsync_rxctl;
2910 	uint64_t rx_tstamp_cycles;
2911 	uint64_t ns;
2912 
2913 	tsync_rxctl = rd32(hw, NGBE_TSRXCTL);
2914 	if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0)
2915 		return -EINVAL;
2916 
2917 	rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev);
2918 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
2919 	*timestamp = rte_ns_to_timespec(ns);
2920 
2921 	return  0;
2922 }
2923 
2924 static int
2925 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
2926 				 struct timespec *timestamp)
2927 {
2928 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2929 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2930 	uint32_t tsync_txctl;
2931 	uint64_t tx_tstamp_cycles;
2932 	uint64_t ns;
2933 
2934 	tsync_txctl = rd32(hw, NGBE_TSTXCTL);
2935 	if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0)
2936 		return -EINVAL;
2937 
2938 	tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev);
2939 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
2940 	*timestamp = rte_ns_to_timespec(ns);
2941 
2942 	return 0;
2943 }
2944 
2945 static int
2946 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
2947 {
2948 	int count = 0;
2949 	int g_ind = 0;
2950 	const struct reg_info *reg_group;
2951 	const struct reg_info **reg_set = ngbe_regs_others;
2952 
2953 	while ((reg_group = reg_set[g_ind++]))
2954 		count += ngbe_regs_group_count(reg_group);
2955 
2956 	return count;
2957 }
2958 
2959 static int
2960 ngbe_get_regs(struct rte_eth_dev *dev,
2961 	      struct rte_dev_reg_info *regs)
2962 {
2963 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2964 	uint32_t *data = regs->data;
2965 	int g_ind = 0;
2966 	int count = 0;
2967 	const struct reg_info *reg_group;
2968 	const struct reg_info **reg_set = ngbe_regs_others;
2969 
2970 	if (data == NULL) {
2971 		regs->length = ngbe_get_reg_length(dev);
2972 		regs->width = sizeof(uint32_t);
2973 		return 0;
2974 	}
2975 
2976 	/* Support only full register dump */
2977 	if (regs->length == 0 ||
2978 	    regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
2979 		regs->version = hw->mac.type << 24 |
2980 				hw->revision_id << 16 |
2981 				hw->device_id;
2982 		while ((reg_group = reg_set[g_ind++]))
2983 			count += ngbe_read_regs_group(dev, &data[count],
2984 						      reg_group);
2985 		return 0;
2986 	}
2987 
2988 	return -ENOTSUP;
2989 }
2990 
2991 static int
2992 ngbe_get_eeprom_length(struct rte_eth_dev *dev)
2993 {
2994 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2995 
2996 	/* Return unit is byte count */
2997 	return hw->rom.word_size * 2;
2998 }
2999 
3000 static int
3001 ngbe_get_eeprom(struct rte_eth_dev *dev,
3002 		struct rte_dev_eeprom_info *in_eeprom)
3003 {
3004 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3005 	struct ngbe_rom_info *eeprom = &hw->rom;
3006 	uint16_t *data = in_eeprom->data;
3007 	int first, length;
3008 
3009 	first = in_eeprom->offset >> 1;
3010 	length = in_eeprom->length >> 1;
3011 	if (first > hw->rom.word_size ||
3012 	    ((first + length) > hw->rom.word_size))
3013 		return -EINVAL;
3014 
3015 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3016 
3017 	return eeprom->readw_buffer(hw, first, length, data);
3018 }
3019 
3020 static int
3021 ngbe_set_eeprom(struct rte_eth_dev *dev,
3022 		struct rte_dev_eeprom_info *in_eeprom)
3023 {
3024 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3025 	struct ngbe_rom_info *eeprom = &hw->rom;
3026 	uint16_t *data = in_eeprom->data;
3027 	int first, length;
3028 
3029 	first = in_eeprom->offset >> 1;
3030 	length = in_eeprom->length >> 1;
3031 	if (first > hw->rom.word_size ||
3032 	    ((first + length) > hw->rom.word_size))
3033 		return -EINVAL;
3034 
3035 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3036 
3037 	return eeprom->writew_buffer(hw,  first, length, data);
3038 }
3039 
3040 static const struct eth_dev_ops ngbe_eth_dev_ops = {
3041 	.dev_configure              = ngbe_dev_configure,
3042 	.dev_infos_get              = ngbe_dev_info_get,
3043 	.dev_start                  = ngbe_dev_start,
3044 	.dev_stop                   = ngbe_dev_stop,
3045 	.dev_close                  = ngbe_dev_close,
3046 	.dev_reset                  = ngbe_dev_reset,
3047 	.promiscuous_enable         = ngbe_dev_promiscuous_enable,
3048 	.promiscuous_disable        = ngbe_dev_promiscuous_disable,
3049 	.allmulticast_enable        = ngbe_dev_allmulticast_enable,
3050 	.allmulticast_disable       = ngbe_dev_allmulticast_disable,
3051 	.link_update                = ngbe_dev_link_update,
3052 	.stats_get                  = ngbe_dev_stats_get,
3053 	.xstats_get                 = ngbe_dev_xstats_get,
3054 	.xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
3055 	.stats_reset                = ngbe_dev_stats_reset,
3056 	.xstats_reset               = ngbe_dev_xstats_reset,
3057 	.xstats_get_names           = ngbe_dev_xstats_get_names,
3058 	.xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
3059 	.fw_version_get             = ngbe_fw_version_get,
3060 	.dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
3061 	.mtu_set                    = ngbe_dev_mtu_set,
3062 	.vlan_filter_set            = ngbe_vlan_filter_set,
3063 	.vlan_tpid_set              = ngbe_vlan_tpid_set,
3064 	.vlan_offload_set           = ngbe_vlan_offload_set,
3065 	.vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
3066 	.rx_queue_start	            = ngbe_dev_rx_queue_start,
3067 	.rx_queue_stop              = ngbe_dev_rx_queue_stop,
3068 	.tx_queue_start	            = ngbe_dev_tx_queue_start,
3069 	.tx_queue_stop              = ngbe_dev_tx_queue_stop,
3070 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
3071 	.rx_queue_release           = ngbe_dev_rx_queue_release,
3072 	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
3073 	.tx_queue_release           = ngbe_dev_tx_queue_release,
3074 	.dev_led_on                 = ngbe_dev_led_on,
3075 	.dev_led_off                = ngbe_dev_led_off,
3076 	.flow_ctrl_get              = ngbe_flow_ctrl_get,
3077 	.flow_ctrl_set              = ngbe_flow_ctrl_set,
3078 	.mac_addr_add               = ngbe_add_rar,
3079 	.mac_addr_remove            = ngbe_remove_rar,
3080 	.mac_addr_set               = ngbe_set_default_mac_addr,
3081 	.uc_hash_table_set          = ngbe_uc_hash_table_set,
3082 	.uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
3083 	.reta_update                = ngbe_dev_rss_reta_update,
3084 	.reta_query                 = ngbe_dev_rss_reta_query,
3085 	.rss_hash_update            = ngbe_dev_rss_hash_update,
3086 	.rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
3087 	.set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
3088 	.rxq_info_get               = ngbe_rxq_info_get,
3089 	.txq_info_get               = ngbe_txq_info_get,
3090 	.rx_burst_mode_get          = ngbe_rx_burst_mode_get,
3091 	.tx_burst_mode_get          = ngbe_tx_burst_mode_get,
3092 	.timesync_enable            = ngbe_timesync_enable,
3093 	.timesync_disable           = ngbe_timesync_disable,
3094 	.timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
3095 	.timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp,
3096 	.get_reg                    = ngbe_get_regs,
3097 	.get_eeprom_length          = ngbe_get_eeprom_length,
3098 	.get_eeprom                 = ngbe_get_eeprom,
3099 	.set_eeprom                 = ngbe_set_eeprom,
3100 	.timesync_adjust_time       = ngbe_timesync_adjust_time,
3101 	.timesync_read_time         = ngbe_timesync_read_time,
3102 	.timesync_write_time        = ngbe_timesync_write_time,
3103 	.tx_done_cleanup            = ngbe_dev_tx_done_cleanup,
3104 };
3105 
3106 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
3107 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
3108 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
3109 
3110 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
3111 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
3112 
3113 #ifdef RTE_ETHDEV_DEBUG_RX
3114 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
3115 #endif
3116 #ifdef RTE_ETHDEV_DEBUG_TX
3117 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
3118 #endif
3119