xref: /dpdk/drivers/net/ngbe/ngbe_ethdev.c (revision 1c839246f934340e8dfb8fd71bc436f81541a587)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9 
10 #include <rte_alarm.h>
11 
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16 #include "ngbe_regs_group.h"
17 
18 static const struct reg_info ngbe_regs_general[] = {
19 	{NGBE_RST, 1, 1, "NGBE_RST"},
20 	{NGBE_STAT, 1, 1, "NGBE_STAT"},
21 	{NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
22 	{NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
23 	{NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
24 	{NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
25 	{0, 0, 0, ""}
26 };
27 
28 static const struct reg_info ngbe_regs_nvm[] = {
29 	{0, 0, 0, ""}
30 };
31 
32 static const struct reg_info ngbe_regs_interrupt[] = {
33 	{0, 0, 0, ""}
34 };
35 
36 static const struct reg_info ngbe_regs_fctl_others[] = {
37 	{0, 0, 0, ""}
38 };
39 
40 static const struct reg_info ngbe_regs_rxdma[] = {
41 	{0, 0, 0, ""}
42 };
43 
44 static const struct reg_info ngbe_regs_rx[] = {
45 	{0, 0, 0, ""}
46 };
47 
48 static struct reg_info ngbe_regs_tx[] = {
49 	{0, 0, 0, ""}
50 };
51 
52 static const struct reg_info ngbe_regs_wakeup[] = {
53 	{0, 0, 0, ""}
54 };
55 
56 static const struct reg_info ngbe_regs_mac[] = {
57 	{0, 0, 0, ""}
58 };
59 
60 static const struct reg_info ngbe_regs_diagnostic[] = {
61 	{0, 0, 0, ""},
62 };
63 
64 /* PF registers */
65 static const struct reg_info *ngbe_regs_others[] = {
66 				ngbe_regs_general,
67 				ngbe_regs_nvm,
68 				ngbe_regs_interrupt,
69 				ngbe_regs_fctl_others,
70 				ngbe_regs_rxdma,
71 				ngbe_regs_rx,
72 				ngbe_regs_tx,
73 				ngbe_regs_wakeup,
74 				ngbe_regs_mac,
75 				ngbe_regs_diagnostic,
76 				NULL};
77 
78 static int ngbe_dev_close(struct rte_eth_dev *dev);
79 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
80 				int wait_to_complete);
81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
84 					uint16_t queue);
85 
86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
91 static void ngbe_dev_interrupt_handler(void *param);
92 static void ngbe_configure_msix(struct rte_eth_dev *dev);
93 
94 #define NGBE_SET_HWSTRIP(h, q) do {\
95 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
96 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
97 		(h)->bitmap[idx] |= 1 << bit;\
98 	} while (0)
99 
100 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
101 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
102 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
103 		(h)->bitmap[idx] &= ~(1 << bit);\
104 	} while (0)
105 
106 #define NGBE_GET_HWSTRIP(h, q, r) do {\
107 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
108 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
109 		(r) = (h)->bitmap[idx] >> bit & 1;\
110 	} while (0)
111 
112 /*
113  * The set of PCI devices this driver supports
114  */
115 static const struct rte_pci_id pci_id_ngbe_map[] = {
116 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
117 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
118 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
119 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
120 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
121 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
122 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
123 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
124 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
128 	{ .vendor_id = 0, /* sentinel */ },
129 };
130 
131 static const struct rte_eth_desc_lim rx_desc_lim = {
132 	.nb_max = NGBE_RING_DESC_MAX,
133 	.nb_min = NGBE_RING_DESC_MIN,
134 	.nb_align = NGBE_RXD_ALIGN,
135 };
136 
137 static const struct rte_eth_desc_lim tx_desc_lim = {
138 	.nb_max = NGBE_RING_DESC_MAX,
139 	.nb_min = NGBE_RING_DESC_MIN,
140 	.nb_align = NGBE_TXD_ALIGN,
141 	.nb_seg_max = NGBE_TX_MAX_SEG,
142 	.nb_mtu_seg_max = NGBE_TX_MAX_SEG,
143 };
144 
145 static const struct eth_dev_ops ngbe_eth_dev_ops;
146 
147 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
148 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
149 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
150 	/* MNG RxTx */
151 	HW_XSTAT(mng_bmc2host_packets),
152 	HW_XSTAT(mng_host2bmc_packets),
153 	/* Basic RxTx */
154 	HW_XSTAT(rx_packets),
155 	HW_XSTAT(tx_packets),
156 	HW_XSTAT(rx_bytes),
157 	HW_XSTAT(tx_bytes),
158 	HW_XSTAT(rx_total_bytes),
159 	HW_XSTAT(rx_total_packets),
160 	HW_XSTAT(tx_total_packets),
161 	HW_XSTAT(rx_total_missed_packets),
162 	HW_XSTAT(rx_broadcast_packets),
163 	HW_XSTAT(rx_multicast_packets),
164 	HW_XSTAT(rx_management_packets),
165 	HW_XSTAT(tx_management_packets),
166 	HW_XSTAT(rx_management_dropped),
167 
168 	/* Basic Error */
169 	HW_XSTAT(rx_crc_errors),
170 	HW_XSTAT(rx_illegal_byte_errors),
171 	HW_XSTAT(rx_error_bytes),
172 	HW_XSTAT(rx_mac_short_packet_dropped),
173 	HW_XSTAT(rx_length_errors),
174 	HW_XSTAT(rx_undersize_errors),
175 	HW_XSTAT(rx_fragment_errors),
176 	HW_XSTAT(rx_oversize_errors),
177 	HW_XSTAT(rx_jabber_errors),
178 	HW_XSTAT(rx_l3_l4_xsum_error),
179 	HW_XSTAT(mac_local_errors),
180 	HW_XSTAT(mac_remote_errors),
181 
182 	/* MACSEC */
183 	HW_XSTAT(tx_macsec_pkts_untagged),
184 	HW_XSTAT(tx_macsec_pkts_encrypted),
185 	HW_XSTAT(tx_macsec_pkts_protected),
186 	HW_XSTAT(tx_macsec_octets_encrypted),
187 	HW_XSTAT(tx_macsec_octets_protected),
188 	HW_XSTAT(rx_macsec_pkts_untagged),
189 	HW_XSTAT(rx_macsec_pkts_badtag),
190 	HW_XSTAT(rx_macsec_pkts_nosci),
191 	HW_XSTAT(rx_macsec_pkts_unknownsci),
192 	HW_XSTAT(rx_macsec_octets_decrypted),
193 	HW_XSTAT(rx_macsec_octets_validated),
194 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
195 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
196 	HW_XSTAT(rx_macsec_sc_pkts_late),
197 	HW_XSTAT(rx_macsec_sa_pkts_ok),
198 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
199 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
200 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
201 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
202 
203 	/* MAC RxTx */
204 	HW_XSTAT(rx_size_64_packets),
205 	HW_XSTAT(rx_size_65_to_127_packets),
206 	HW_XSTAT(rx_size_128_to_255_packets),
207 	HW_XSTAT(rx_size_256_to_511_packets),
208 	HW_XSTAT(rx_size_512_to_1023_packets),
209 	HW_XSTAT(rx_size_1024_to_max_packets),
210 	HW_XSTAT(tx_size_64_packets),
211 	HW_XSTAT(tx_size_65_to_127_packets),
212 	HW_XSTAT(tx_size_128_to_255_packets),
213 	HW_XSTAT(tx_size_256_to_511_packets),
214 	HW_XSTAT(tx_size_512_to_1023_packets),
215 	HW_XSTAT(tx_size_1024_to_max_packets),
216 
217 	/* Flow Control */
218 	HW_XSTAT(tx_xon_packets),
219 	HW_XSTAT(rx_xon_packets),
220 	HW_XSTAT(tx_xoff_packets),
221 	HW_XSTAT(rx_xoff_packets),
222 
223 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
224 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
225 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
226 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
227 };
228 
229 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
230 			   sizeof(rte_ngbe_stats_strings[0]))
231 
232 /* Per-queue statistics */
233 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
234 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
235 	QP_XSTAT(rx_qp_packets),
236 	QP_XSTAT(tx_qp_packets),
237 	QP_XSTAT(rx_qp_bytes),
238 	QP_XSTAT(tx_qp_bytes),
239 	QP_XSTAT(rx_qp_mc_packets),
240 };
241 
242 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
243 			   sizeof(rte_ngbe_qp_strings[0]))
244 
245 static inline int32_t
246 ngbe_pf_reset_hw(struct ngbe_hw *hw)
247 {
248 	uint32_t ctrl_ext;
249 	int32_t status;
250 
251 	status = hw->mac.reset_hw(hw);
252 
253 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
254 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
255 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
256 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
257 	ngbe_flush(hw);
258 
259 	if (status == NGBE_ERR_SFP_NOT_PRESENT)
260 		status = 0;
261 	return status;
262 }
263 
264 static inline void
265 ngbe_enable_intr(struct rte_eth_dev *dev)
266 {
267 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
268 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
269 
270 	wr32(hw, NGBE_IENMISC, intr->mask_misc);
271 	wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
272 	ngbe_flush(hw);
273 }
274 
275 static void
276 ngbe_disable_intr(struct ngbe_hw *hw)
277 {
278 	PMD_INIT_FUNC_TRACE();
279 
280 	wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
281 	ngbe_flush(hw);
282 }
283 
284 /*
285  * Ensure that all locks are released before first NVM or PHY access
286  */
287 static void
288 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
289 {
290 	uint16_t mask;
291 
292 	/*
293 	 * These ones are more tricky since they are common to all ports; but
294 	 * swfw_sync retries last long enough (1s) to be almost sure that if
295 	 * lock can not be taken it is due to an improper lock of the
296 	 * semaphore.
297 	 */
298 	mask = NGBE_MNGSEM_SWPHY |
299 	       NGBE_MNGSEM_SWMBX |
300 	       NGBE_MNGSEM_SWFLASH;
301 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
302 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
303 
304 	hw->mac.release_swfw_sync(hw, mask);
305 }
306 
307 static int
308 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
309 {
310 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
311 	struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
312 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
313 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
314 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
315 	const struct rte_memzone *mz;
316 	uint32_t ctrl_ext;
317 	u32 led_conf = 0;
318 	int err, ret;
319 
320 	PMD_INIT_FUNC_TRACE();
321 
322 	eth_dev->dev_ops = &ngbe_eth_dev_ops;
323 	eth_dev->rx_queue_count       = ngbe_dev_rx_queue_count;
324 	eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
325 	eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
326 	eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
327 	eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
328 	eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
329 
330 	/*
331 	 * For secondary processes, we don't initialise any further as primary
332 	 * has already done this work. Only check we don't need a different
333 	 * Rx and Tx function.
334 	 */
335 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
336 		struct ngbe_tx_queue *txq;
337 		/* Tx queue function in primary, set by last queue initialized
338 		 * Tx queue may not initialized by primary process
339 		 */
340 		if (eth_dev->data->tx_queues) {
341 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
342 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
343 			ngbe_set_tx_function(eth_dev, txq);
344 		} else {
345 			/* Use default Tx function if we get here */
346 			PMD_INIT_LOG(NOTICE,
347 				"No Tx queues configured yet. Using default Tx function.");
348 		}
349 
350 		ngbe_set_rx_function(eth_dev);
351 
352 		return 0;
353 	}
354 
355 	rte_eth_copy_pci_info(eth_dev, pci_dev);
356 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
357 
358 	/* Vendor and Device ID need to be set before init of shared code */
359 	hw->device_id = pci_dev->id.device_id;
360 	hw->vendor_id = pci_dev->id.vendor_id;
361 	hw->sub_system_id = pci_dev->id.subsystem_device_id;
362 	ngbe_map_device_id(hw);
363 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
364 
365 	/* Reserve memory for interrupt status block */
366 	mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
367 		NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
368 	if (mz == NULL)
369 		return -ENOMEM;
370 
371 	hw->isb_dma = TMZ_PADDR(mz);
372 	hw->isb_mem = TMZ_VADDR(mz);
373 
374 	/* Initialize the shared code (base driver) */
375 	err = ngbe_init_shared_code(hw);
376 	if (err != 0) {
377 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
378 		return -EIO;
379 	}
380 
381 	/* Unlock any pending hardware semaphore */
382 	ngbe_swfw_lock_reset(hw);
383 
384 	/* Get Hardware Flow Control setting */
385 	hw->fc.requested_mode = ngbe_fc_full;
386 	hw->fc.current_mode = ngbe_fc_full;
387 	hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
388 	hw->fc.low_water = NGBE_FC_XON_LOTH;
389 	hw->fc.high_water = NGBE_FC_XOFF_HITH;
390 	hw->fc.send_xon = 1;
391 
392 	err = hw->rom.init_params(hw);
393 	if (err != 0) {
394 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
395 		return -EIO;
396 	}
397 
398 	/* Make sure we have a good EEPROM before we read from it */
399 	err = hw->rom.validate_checksum(hw, NULL);
400 	if (err != 0) {
401 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
402 		return -EIO;
403 	}
404 
405 	err = hw->phy.led_oem_chk(hw, &led_conf);
406 	if (err == 0)
407 		hw->led_conf = led_conf;
408 	else
409 		hw->led_conf = 0xFFFF;
410 
411 	err = hw->mac.init_hw(hw);
412 	if (err != 0) {
413 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
414 		return -EIO;
415 	}
416 
417 	/* Reset the hw statistics */
418 	ngbe_dev_stats_reset(eth_dev);
419 
420 	/* disable interrupt */
421 	ngbe_disable_intr(hw);
422 
423 	/* Allocate memory for storing MAC addresses */
424 	eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
425 					       hw->mac.num_rar_entries, 0);
426 	if (eth_dev->data->mac_addrs == NULL) {
427 		PMD_INIT_LOG(ERR,
428 			     "Failed to allocate %u bytes needed to store MAC addresses",
429 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
430 		return -ENOMEM;
431 	}
432 
433 	/* Copy the permanent MAC address */
434 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
435 			&eth_dev->data->mac_addrs[0]);
436 
437 	/* Allocate memory for storing hash filter MAC addresses */
438 	eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
439 			RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
440 	if (eth_dev->data->hash_mac_addrs == NULL) {
441 		PMD_INIT_LOG(ERR,
442 			     "Failed to allocate %d bytes needed to store MAC addresses",
443 			     RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
444 		rte_free(eth_dev->data->mac_addrs);
445 		eth_dev->data->mac_addrs = NULL;
446 		return -ENOMEM;
447 	}
448 
449 	/* initialize the vfta */
450 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
451 
452 	/* initialize the hw strip bitmap*/
453 	memset(hwstrip, 0, sizeof(*hwstrip));
454 
455 	/* initialize PF if max_vfs not zero */
456 	ret = ngbe_pf_host_init(eth_dev);
457 	if (ret) {
458 		rte_free(eth_dev->data->mac_addrs);
459 		eth_dev->data->mac_addrs = NULL;
460 		rte_free(eth_dev->data->hash_mac_addrs);
461 		eth_dev->data->hash_mac_addrs = NULL;
462 		return ret;
463 	}
464 
465 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
466 	/* let hardware know driver is loaded */
467 	ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
468 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
469 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
470 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
471 	ngbe_flush(hw);
472 
473 	PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
474 			(int)hw->mac.type, (int)hw->phy.type);
475 
476 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
477 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
478 		     pci_dev->id.device_id);
479 
480 	rte_intr_callback_register(intr_handle,
481 				   ngbe_dev_interrupt_handler, eth_dev);
482 
483 	/* enable uio/vfio intr/eventfd mapping */
484 	rte_intr_enable(intr_handle);
485 
486 	/* enable support intr */
487 	ngbe_enable_intr(eth_dev);
488 
489 	return 0;
490 }
491 
492 static int
493 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
494 {
495 	PMD_INIT_FUNC_TRACE();
496 
497 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
498 		return 0;
499 
500 	ngbe_dev_close(eth_dev);
501 
502 	return 0;
503 }
504 
505 static int
506 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
507 		struct rte_pci_device *pci_dev)
508 {
509 	return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
510 			sizeof(struct ngbe_adapter),
511 			eth_dev_pci_specific_init, pci_dev,
512 			eth_ngbe_dev_init, NULL);
513 }
514 
515 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
516 {
517 	struct rte_eth_dev *ethdev;
518 
519 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
520 	if (ethdev == NULL)
521 		return 0;
522 
523 	return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
524 }
525 
526 static struct rte_pci_driver rte_ngbe_pmd = {
527 	.id_table = pci_id_ngbe_map,
528 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
529 		     RTE_PCI_DRV_INTR_LSC,
530 	.probe = eth_ngbe_pci_probe,
531 	.remove = eth_ngbe_pci_remove,
532 };
533 
534 static int
535 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
536 {
537 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
538 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
539 	uint32_t vfta;
540 	uint32_t vid_idx;
541 	uint32_t vid_bit;
542 
543 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
544 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
545 	vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
546 	if (on)
547 		vfta |= vid_bit;
548 	else
549 		vfta &= ~vid_bit;
550 	wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
551 
552 	/* update local VFTA copy */
553 	shadow_vfta->vfta[vid_idx] = vfta;
554 
555 	return 0;
556 }
557 
558 static void
559 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
560 {
561 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
562 	struct ngbe_rx_queue *rxq;
563 	bool restart;
564 	uint32_t rxcfg, rxbal, rxbah;
565 
566 	if (on)
567 		ngbe_vlan_hw_strip_enable(dev, queue);
568 	else
569 		ngbe_vlan_hw_strip_disable(dev, queue);
570 
571 	rxq = dev->data->rx_queues[queue];
572 	rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
573 	rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
574 	rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
575 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
576 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
577 			!(rxcfg & NGBE_RXCFG_VLAN);
578 		rxcfg |= NGBE_RXCFG_VLAN;
579 	} else {
580 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
581 			(rxcfg & NGBE_RXCFG_VLAN);
582 		rxcfg &= ~NGBE_RXCFG_VLAN;
583 	}
584 	rxcfg &= ~NGBE_RXCFG_ENA;
585 
586 	if (restart) {
587 		/* set vlan strip for ring */
588 		ngbe_dev_rx_queue_stop(dev, queue);
589 		wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
590 		wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
591 		wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
592 		ngbe_dev_rx_queue_start(dev, queue);
593 	}
594 }
595 
596 static int
597 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
598 		    enum rte_vlan_type vlan_type,
599 		    uint16_t tpid)
600 {
601 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
602 	int ret = 0;
603 	uint32_t portctrl, vlan_ext, qinq;
604 
605 	portctrl = rd32(hw, NGBE_PORTCTL);
606 
607 	vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
608 	qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
609 	switch (vlan_type) {
610 	case RTE_ETH_VLAN_TYPE_INNER:
611 		if (vlan_ext) {
612 			wr32m(hw, NGBE_VLANCTL,
613 				NGBE_VLANCTL_TPID_MASK,
614 				NGBE_VLANCTL_TPID(tpid));
615 			wr32m(hw, NGBE_DMATXCTRL,
616 				NGBE_DMATXCTRL_TPID_MASK,
617 				NGBE_DMATXCTRL_TPID(tpid));
618 		} else {
619 			ret = -ENOTSUP;
620 			PMD_DRV_LOG(ERR,
621 				"Inner type is not supported by single VLAN");
622 		}
623 
624 		if (qinq) {
625 			wr32m(hw, NGBE_TAGTPID(0),
626 				NGBE_TAGTPID_LSB_MASK,
627 				NGBE_TAGTPID_LSB(tpid));
628 		}
629 		break;
630 	case RTE_ETH_VLAN_TYPE_OUTER:
631 		if (vlan_ext) {
632 			/* Only the high 16-bits is valid */
633 			wr32m(hw, NGBE_EXTAG,
634 				NGBE_EXTAG_VLAN_MASK,
635 				NGBE_EXTAG_VLAN(tpid));
636 		} else {
637 			wr32m(hw, NGBE_VLANCTL,
638 				NGBE_VLANCTL_TPID_MASK,
639 				NGBE_VLANCTL_TPID(tpid));
640 			wr32m(hw, NGBE_DMATXCTRL,
641 				NGBE_DMATXCTRL_TPID_MASK,
642 				NGBE_DMATXCTRL_TPID(tpid));
643 		}
644 
645 		if (qinq) {
646 			wr32m(hw, NGBE_TAGTPID(0),
647 				NGBE_TAGTPID_MSB_MASK,
648 				NGBE_TAGTPID_MSB(tpid));
649 		}
650 		break;
651 	default:
652 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
653 		return -EINVAL;
654 	}
655 
656 	return ret;
657 }
658 
659 void
660 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
661 {
662 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
663 	uint32_t vlnctrl;
664 
665 	PMD_INIT_FUNC_TRACE();
666 
667 	/* Filter Table Disable */
668 	vlnctrl = rd32(hw, NGBE_VLANCTL);
669 	vlnctrl &= ~NGBE_VLANCTL_VFE;
670 	wr32(hw, NGBE_VLANCTL, vlnctrl);
671 }
672 
673 void
674 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
675 {
676 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
677 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
678 	uint32_t vlnctrl;
679 	uint16_t i;
680 
681 	PMD_INIT_FUNC_TRACE();
682 
683 	/* Filter Table Enable */
684 	vlnctrl = rd32(hw, NGBE_VLANCTL);
685 	vlnctrl &= ~NGBE_VLANCTL_CFIENA;
686 	vlnctrl |= NGBE_VLANCTL_VFE;
687 	wr32(hw, NGBE_VLANCTL, vlnctrl);
688 
689 	/* write whatever is in local vfta copy */
690 	for (i = 0; i < NGBE_VFTA_SIZE; i++)
691 		wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
692 }
693 
694 void
695 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
696 {
697 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
698 	struct ngbe_rx_queue *rxq;
699 
700 	if (queue >= NGBE_MAX_RX_QUEUE_NUM)
701 		return;
702 
703 	if (on)
704 		NGBE_SET_HWSTRIP(hwstrip, queue);
705 	else
706 		NGBE_CLEAR_HWSTRIP(hwstrip, queue);
707 
708 	if (queue >= dev->data->nb_rx_queues)
709 		return;
710 
711 	rxq = dev->data->rx_queues[queue];
712 
713 	if (on) {
714 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
715 		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
716 	} else {
717 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
718 		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
719 	}
720 }
721 
722 static void
723 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
724 {
725 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
726 	uint32_t ctrl;
727 
728 	PMD_INIT_FUNC_TRACE();
729 
730 	ctrl = rd32(hw, NGBE_RXCFG(queue));
731 	ctrl &= ~NGBE_RXCFG_VLAN;
732 	wr32(hw, NGBE_RXCFG(queue), ctrl);
733 
734 	/* record those setting for HW strip per queue */
735 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
736 }
737 
738 static void
739 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
740 {
741 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
742 	uint32_t ctrl;
743 
744 	PMD_INIT_FUNC_TRACE();
745 
746 	ctrl = rd32(hw, NGBE_RXCFG(queue));
747 	ctrl |= NGBE_RXCFG_VLAN;
748 	wr32(hw, NGBE_RXCFG(queue), ctrl);
749 
750 	/* record those setting for HW strip per queue */
751 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
752 }
753 
754 static void
755 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
756 {
757 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
758 	uint32_t ctrl;
759 
760 	PMD_INIT_FUNC_TRACE();
761 
762 	ctrl = rd32(hw, NGBE_PORTCTL);
763 	ctrl &= ~NGBE_PORTCTL_VLANEXT;
764 	ctrl &= ~NGBE_PORTCTL_QINQ;
765 	wr32(hw, NGBE_PORTCTL, ctrl);
766 }
767 
768 static void
769 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
770 {
771 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
772 	uint32_t ctrl;
773 
774 	PMD_INIT_FUNC_TRACE();
775 
776 	ctrl  = rd32(hw, NGBE_PORTCTL);
777 	ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
778 	wr32(hw, NGBE_PORTCTL, ctrl);
779 }
780 
781 static void
782 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
783 {
784 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
785 	uint32_t ctrl;
786 
787 	PMD_INIT_FUNC_TRACE();
788 
789 	ctrl = rd32(hw, NGBE_PORTCTL);
790 	ctrl &= ~NGBE_PORTCTL_QINQ;
791 	wr32(hw, NGBE_PORTCTL, ctrl);
792 }
793 
794 static void
795 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
796 {
797 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
798 	uint32_t ctrl;
799 
800 	PMD_INIT_FUNC_TRACE();
801 
802 	ctrl  = rd32(hw, NGBE_PORTCTL);
803 	ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
804 	wr32(hw, NGBE_PORTCTL, ctrl);
805 }
806 
807 void
808 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
809 {
810 	struct ngbe_rx_queue *rxq;
811 	uint16_t i;
812 
813 	PMD_INIT_FUNC_TRACE();
814 
815 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
816 		rxq = dev->data->rx_queues[i];
817 
818 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
819 			ngbe_vlan_hw_strip_enable(dev, i);
820 		else
821 			ngbe_vlan_hw_strip_disable(dev, i);
822 	}
823 }
824 
825 void
826 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
827 {
828 	uint16_t i;
829 	struct rte_eth_rxmode *rxmode;
830 	struct ngbe_rx_queue *rxq;
831 
832 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
833 		rxmode = &dev->data->dev_conf.rxmode;
834 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
835 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
836 				rxq = dev->data->rx_queues[i];
837 				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
838 			}
839 		else
840 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
841 				rxq = dev->data->rx_queues[i];
842 				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
843 			}
844 	}
845 }
846 
847 static int
848 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
849 {
850 	struct rte_eth_rxmode *rxmode;
851 	rxmode = &dev->data->dev_conf.rxmode;
852 
853 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
854 		ngbe_vlan_hw_strip_config(dev);
855 
856 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
857 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
858 			ngbe_vlan_hw_filter_enable(dev);
859 		else
860 			ngbe_vlan_hw_filter_disable(dev);
861 	}
862 
863 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
864 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
865 			ngbe_vlan_hw_extend_enable(dev);
866 		else
867 			ngbe_vlan_hw_extend_disable(dev);
868 	}
869 
870 	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
871 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
872 			ngbe_qinq_hw_strip_enable(dev);
873 		else
874 			ngbe_qinq_hw_strip_disable(dev);
875 	}
876 
877 	return 0;
878 }
879 
880 static int
881 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
882 {
883 	ngbe_config_vlan_strip_on_all_queues(dev, mask);
884 
885 	ngbe_vlan_offload_config(dev, mask);
886 
887 	return 0;
888 }
889 
890 static int
891 ngbe_dev_configure(struct rte_eth_dev *dev)
892 {
893 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
894 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
895 
896 	PMD_INIT_FUNC_TRACE();
897 
898 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
899 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
900 
901 	/* set flag to update link status after init */
902 	intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
903 
904 	/*
905 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
906 	 * allocation Rx preconditions we will reset it.
907 	 */
908 	adapter->rx_bulk_alloc_allowed = true;
909 
910 	return 0;
911 }
912 
913 static void
914 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
915 {
916 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
917 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
918 
919 	wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
920 	wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
921 	wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
922 	if (hw->phy.type == ngbe_phy_yt8521s_sfi)
923 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
924 	else
925 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
926 
927 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
928 }
929 
930 /*
931  * Configure device link speed and setup link.
932  * It returns 0 on success.
933  */
934 static int
935 ngbe_dev_start(struct rte_eth_dev *dev)
936 {
937 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
938 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
939 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
940 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
941 	uint32_t intr_vector = 0;
942 	int err;
943 	bool link_up = false, negotiate = false;
944 	uint32_t speed = 0;
945 	uint32_t allowed_speeds = 0;
946 	int mask = 0;
947 	int status;
948 	uint32_t *link_speeds;
949 
950 	PMD_INIT_FUNC_TRACE();
951 
952 	/* Stop the link setup handler before resetting the HW. */
953 	rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev);
954 
955 	/* disable uio/vfio intr/eventfd mapping */
956 	rte_intr_disable(intr_handle);
957 
958 	/* stop adapter */
959 	hw->adapter_stopped = 0;
960 
961 	/* reinitialize adapter, this calls reset and start */
962 	hw->nb_rx_queues = dev->data->nb_rx_queues;
963 	hw->nb_tx_queues = dev->data->nb_tx_queues;
964 	status = ngbe_pf_reset_hw(hw);
965 	if (status != 0)
966 		return -1;
967 	hw->mac.start_hw(hw);
968 	hw->mac.get_link_status = true;
969 
970 	ngbe_set_pcie_master(hw, true);
971 
972 	/* configure PF module if SRIOV enabled */
973 	ngbe_pf_host_configure(dev);
974 
975 	ngbe_dev_phy_intr_setup(dev);
976 
977 	/* check and configure queue intr-vector mapping */
978 	if ((rte_intr_cap_multiple(intr_handle) ||
979 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
980 	    dev->data->dev_conf.intr_conf.rxq != 0) {
981 		intr_vector = dev->data->nb_rx_queues;
982 		if (rte_intr_efd_enable(intr_handle, intr_vector))
983 			return -1;
984 	}
985 
986 	if (rte_intr_dp_is_en(intr_handle)) {
987 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
988 						   dev->data->nb_rx_queues)) {
989 			PMD_INIT_LOG(ERR,
990 				     "Failed to allocate %d rx_queues intr_vec",
991 				     dev->data->nb_rx_queues);
992 			return -ENOMEM;
993 		}
994 	}
995 
996 	/* configure MSI-X for sleep until Rx interrupt */
997 	ngbe_configure_msix(dev);
998 
999 	/* initialize transmission unit */
1000 	ngbe_dev_tx_init(dev);
1001 
1002 	/* This can fail when allocating mbufs for descriptor rings */
1003 	err = ngbe_dev_rx_init(dev);
1004 	if (err != 0) {
1005 		PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
1006 		goto error;
1007 	}
1008 
1009 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1010 		RTE_ETH_VLAN_EXTEND_MASK;
1011 	err = ngbe_vlan_offload_config(dev, mask);
1012 	if (err != 0) {
1013 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1014 		goto error;
1015 	}
1016 
1017 	hw->mac.setup_pba(hw);
1018 	ngbe_configure_port(dev);
1019 
1020 	err = ngbe_dev_rxtx_start(dev);
1021 	if (err < 0) {
1022 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1023 		goto error;
1024 	}
1025 
1026 	/* Skip link setup if loopback mode is enabled. */
1027 	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1028 		goto skip_link_setup;
1029 
1030 	err = hw->mac.check_link(hw, &speed, &link_up, 0);
1031 	if (err != 0)
1032 		goto error;
1033 	dev->data->dev_link.link_status = link_up;
1034 
1035 	link_speeds = &dev->data->dev_conf.link_speeds;
1036 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
1037 		negotiate = true;
1038 
1039 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1040 	if (err != 0)
1041 		goto error;
1042 
1043 	allowed_speeds = 0;
1044 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1045 		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
1046 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1047 		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
1048 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1049 		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
1050 
1051 	if (*link_speeds & ~allowed_speeds) {
1052 		PMD_INIT_LOG(ERR, "Invalid link setting");
1053 		goto error;
1054 	}
1055 
1056 	speed = 0x0;
1057 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1058 		speed = hw->mac.default_speeds;
1059 	} else {
1060 		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1061 			speed |= NGBE_LINK_SPEED_1GB_FULL;
1062 		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1063 			speed |= NGBE_LINK_SPEED_100M_FULL;
1064 		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
1065 			speed |= NGBE_LINK_SPEED_10M_FULL;
1066 	}
1067 
1068 	err = hw->phy.init_hw(hw);
1069 	if (err != 0) {
1070 		PMD_INIT_LOG(ERR, "PHY init failed");
1071 		goto error;
1072 	}
1073 	err = hw->mac.setup_link(hw, speed, link_up);
1074 	if (err != 0)
1075 		goto error;
1076 
1077 skip_link_setup:
1078 
1079 	if (rte_intr_allow_others(intr_handle)) {
1080 		ngbe_dev_misc_interrupt_setup(dev);
1081 		/* check if lsc interrupt is enabled */
1082 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1083 			ngbe_dev_lsc_interrupt_setup(dev, TRUE);
1084 		else
1085 			ngbe_dev_lsc_interrupt_setup(dev, FALSE);
1086 		ngbe_dev_macsec_interrupt_setup(dev);
1087 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1088 	} else {
1089 		rte_intr_callback_unregister(intr_handle,
1090 					     ngbe_dev_interrupt_handler, dev);
1091 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1092 			PMD_INIT_LOG(INFO,
1093 				     "LSC won't enable because of no intr multiplex");
1094 	}
1095 
1096 	/* check if rxq interrupt is enabled */
1097 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1098 	    rte_intr_dp_is_en(intr_handle))
1099 		ngbe_dev_rxq_interrupt_setup(dev);
1100 
1101 	/* enable UIO/VFIO intr/eventfd mapping */
1102 	rte_intr_enable(intr_handle);
1103 
1104 	/* resume enabled intr since HW reset */
1105 	ngbe_enable_intr(dev);
1106 
1107 	if (hw->gpio_ctl) {
1108 		/* gpio0 is used to power on/off control*/
1109 		wr32(hw, NGBE_GPIODATA, 0);
1110 	}
1111 
1112 	/*
1113 	 * Update link status right before return, because it may
1114 	 * start link configuration process in a separate thread.
1115 	 */
1116 	ngbe_dev_link_update(dev, 0);
1117 
1118 	ngbe_read_stats_registers(hw, hw_stats);
1119 	hw->offset_loaded = 1;
1120 
1121 	return 0;
1122 
1123 error:
1124 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1125 	ngbe_dev_clear_queues(dev);
1126 	return -EIO;
1127 }
1128 
1129 /*
1130  * Stop device: disable rx and tx functions to allow for reconfiguring.
1131  */
1132 static int
1133 ngbe_dev_stop(struct rte_eth_dev *dev)
1134 {
1135 	struct rte_eth_link link;
1136 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1137 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1138 	struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1139 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1140 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1141 	int vf;
1142 
1143 	if (hw->adapter_stopped)
1144 		return 0;
1145 
1146 	PMD_INIT_FUNC_TRACE();
1147 
1148 	rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev);
1149 
1150 	if (hw->gpio_ctl) {
1151 		/* gpio0 is used to power on/off control*/
1152 		wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1153 	}
1154 
1155 	/* disable interrupts */
1156 	ngbe_disable_intr(hw);
1157 
1158 	/* reset the NIC */
1159 	ngbe_pf_reset_hw(hw);
1160 	hw->adapter_stopped = 0;
1161 
1162 	/* stop adapter */
1163 	ngbe_stop_hw(hw);
1164 
1165 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1166 		vfinfo[vf].clear_to_send = false;
1167 
1168 	ngbe_dev_clear_queues(dev);
1169 
1170 	/* Clear stored conf */
1171 	dev->data->scattered_rx = 0;
1172 
1173 	/* Clear recorded link status */
1174 	memset(&link, 0, sizeof(link));
1175 	rte_eth_linkstatus_set(dev, &link);
1176 
1177 	if (!rte_intr_allow_others(intr_handle))
1178 		/* resume to the default handler */
1179 		rte_intr_callback_register(intr_handle,
1180 					   ngbe_dev_interrupt_handler,
1181 					   (void *)dev);
1182 
1183 	/* Clean datapath event and queue/vec mapping */
1184 	rte_intr_efd_disable(intr_handle);
1185 	rte_intr_vec_list_free(intr_handle);
1186 
1187 	ngbe_set_pcie_master(hw, true);
1188 
1189 	adapter->rss_reta_updated = 0;
1190 
1191 	hw->adapter_stopped = true;
1192 	dev->data->dev_started = 0;
1193 
1194 	return 0;
1195 }
1196 
1197 /*
1198  * Reset and stop device.
1199  */
1200 static int
1201 ngbe_dev_close(struct rte_eth_dev *dev)
1202 {
1203 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1204 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1205 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1206 	int retries = 0;
1207 	int ret;
1208 
1209 	PMD_INIT_FUNC_TRACE();
1210 
1211 	ngbe_pf_reset_hw(hw);
1212 
1213 	ngbe_dev_stop(dev);
1214 
1215 	ngbe_dev_free_queues(dev);
1216 
1217 	ngbe_set_pcie_master(hw, false);
1218 
1219 	/* reprogram the RAR[0] in case user changed it. */
1220 	ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1221 
1222 	/* Unlock any pending hardware semaphore */
1223 	ngbe_swfw_lock_reset(hw);
1224 
1225 	/* disable uio intr before callback unregister */
1226 	rte_intr_disable(intr_handle);
1227 
1228 	do {
1229 		ret = rte_intr_callback_unregister(intr_handle,
1230 				ngbe_dev_interrupt_handler, dev);
1231 		if (ret >= 0 || ret == -ENOENT) {
1232 			break;
1233 		} else if (ret != -EAGAIN) {
1234 			PMD_INIT_LOG(ERR,
1235 				"intr callback unregister failed: %d",
1236 				ret);
1237 		}
1238 		rte_delay_ms(100);
1239 	} while (retries++ < (10 + NGBE_LINK_UP_TIME));
1240 
1241 	/* uninitialize PF if max_vfs not zero */
1242 	ngbe_pf_host_uninit(dev);
1243 
1244 	rte_free(dev->data->mac_addrs);
1245 	dev->data->mac_addrs = NULL;
1246 
1247 	rte_free(dev->data->hash_mac_addrs);
1248 	dev->data->hash_mac_addrs = NULL;
1249 
1250 	return ret;
1251 }
1252 
1253 /*
1254  * Reset PF device.
1255  */
1256 static int
1257 ngbe_dev_reset(struct rte_eth_dev *dev)
1258 {
1259 	int ret;
1260 
1261 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
1262 	 * its VF to make them align with it. The detailed notification
1263 	 * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1264 	 * To avoid unexpected behavior in VF, currently reset of PF with
1265 	 * SR-IOV activation is not supported. It might be supported later.
1266 	 */
1267 	if (dev->data->sriov.active)
1268 		return -ENOTSUP;
1269 
1270 	ret = eth_ngbe_dev_uninit(dev);
1271 	if (ret != 0)
1272 		return ret;
1273 
1274 	ret = eth_ngbe_dev_init(dev, NULL);
1275 
1276 	return ret;
1277 }
1278 
1279 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1280 	{                                                       \
1281 		uint32_t current_counter = rd32(hw, reg);       \
1282 		if (current_counter < last_counter)             \
1283 			current_counter += 0x100000000LL;       \
1284 		if (!hw->offset_loaded)                         \
1285 			last_counter = current_counter;         \
1286 		counter = current_counter - last_counter;       \
1287 		counter &= 0xFFFFFFFFLL;                        \
1288 	}
1289 
1290 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1291 	{                                                                \
1292 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1293 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1294 		uint64_t current_counter = (current_counter_msb << 32) | \
1295 			current_counter_lsb;                             \
1296 		if (current_counter < last_counter)                      \
1297 			current_counter += 0x1000000000LL;               \
1298 		if (!hw->offset_loaded)                                  \
1299 			last_counter = current_counter;                  \
1300 		counter = current_counter - last_counter;                \
1301 		counter &= 0xFFFFFFFFFLL;                                \
1302 	}
1303 
1304 void
1305 ngbe_read_stats_registers(struct ngbe_hw *hw,
1306 			   struct ngbe_hw_stats *hw_stats)
1307 {
1308 	unsigned int i;
1309 
1310 	/* QP Stats */
1311 	for (i = 0; i < hw->nb_rx_queues; i++) {
1312 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1313 			hw->qp_last[i].rx_qp_packets,
1314 			hw_stats->qp[i].rx_qp_packets);
1315 		UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1316 			hw->qp_last[i].rx_qp_bytes,
1317 			hw_stats->qp[i].rx_qp_bytes);
1318 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1319 			hw->qp_last[i].rx_qp_mc_packets,
1320 			hw_stats->qp[i].rx_qp_mc_packets);
1321 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1322 			hw->qp_last[i].rx_qp_bc_packets,
1323 			hw_stats->qp[i].rx_qp_bc_packets);
1324 	}
1325 
1326 	for (i = 0; i < hw->nb_tx_queues; i++) {
1327 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1328 			hw->qp_last[i].tx_qp_packets,
1329 			hw_stats->qp[i].tx_qp_packets);
1330 		UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1331 			hw->qp_last[i].tx_qp_bytes,
1332 			hw_stats->qp[i].tx_qp_bytes);
1333 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1334 			hw->qp_last[i].tx_qp_mc_packets,
1335 			hw_stats->qp[i].tx_qp_mc_packets);
1336 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1337 			hw->qp_last[i].tx_qp_bc_packets,
1338 			hw_stats->qp[i].tx_qp_bc_packets);
1339 	}
1340 
1341 	/* PB Stats */
1342 	hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1343 	hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1344 	hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1345 	hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1346 	hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1347 	hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1348 
1349 	hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1350 	hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1351 
1352 	/* DMA Stats */
1353 	hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1354 	hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1355 	hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1356 	hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1357 	hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1358 	hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1359 	hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1360 	hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1361 
1362 	/* MAC Stats */
1363 	hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1364 	hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1365 	hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1366 
1367 	hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1368 	hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1369 	hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1370 
1371 	hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1372 	hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1373 
1374 	hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1375 	hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1376 	hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1377 	hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1378 	hw_stats->rx_size_512_to_1023_packets +=
1379 			rd64(hw, NGBE_MACRX512TO1023L);
1380 	hw_stats->rx_size_1024_to_max_packets +=
1381 			rd64(hw, NGBE_MACRX1024TOMAXL);
1382 	hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1383 	hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1384 	hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1385 	hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1386 	hw_stats->tx_size_512_to_1023_packets +=
1387 			rd64(hw, NGBE_MACTX512TO1023L);
1388 	hw_stats->tx_size_1024_to_max_packets +=
1389 			rd64(hw, NGBE_MACTX1024TOMAXL);
1390 
1391 	hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1392 	hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1393 	hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1394 
1395 	/* MNG Stats */
1396 	hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1397 	hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1398 	hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1399 	hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1400 
1401 	/* MACsec Stats */
1402 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1403 	hw_stats->tx_macsec_pkts_encrypted +=
1404 			rd32(hw, NGBE_LSECTX_ENCPKT);
1405 	hw_stats->tx_macsec_pkts_protected +=
1406 			rd32(hw, NGBE_LSECTX_PROTPKT);
1407 	hw_stats->tx_macsec_octets_encrypted +=
1408 			rd32(hw, NGBE_LSECTX_ENCOCT);
1409 	hw_stats->tx_macsec_octets_protected +=
1410 			rd32(hw, NGBE_LSECTX_PROTOCT);
1411 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1412 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1413 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1414 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1415 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1416 	hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1417 	hw_stats->rx_macsec_sc_pkts_unchecked +=
1418 			rd32(hw, NGBE_LSECRX_UNCHKPKT);
1419 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1420 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1421 	for (i = 0; i < 2; i++) {
1422 		hw_stats->rx_macsec_sa_pkts_ok +=
1423 			rd32(hw, NGBE_LSECRX_OKPKT(i));
1424 		hw_stats->rx_macsec_sa_pkts_invalid +=
1425 			rd32(hw, NGBE_LSECRX_INVPKT(i));
1426 		hw_stats->rx_macsec_sa_pkts_notvalid +=
1427 			rd32(hw, NGBE_LSECRX_BADPKT(i));
1428 	}
1429 	for (i = 0; i < 4; i++) {
1430 		hw_stats->rx_macsec_sa_pkts_unusedsa +=
1431 			rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1432 		hw_stats->rx_macsec_sa_pkts_notusingsa +=
1433 			rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1434 	}
1435 	hw_stats->rx_total_missed_packets =
1436 			hw_stats->rx_up_dropped;
1437 }
1438 
1439 static int
1440 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1441 {
1442 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1443 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1444 	struct ngbe_stat_mappings *stat_mappings =
1445 			NGBE_DEV_STAT_MAPPINGS(dev);
1446 	uint32_t i, j;
1447 
1448 	ngbe_read_stats_registers(hw, hw_stats);
1449 
1450 	if (stats == NULL)
1451 		return -EINVAL;
1452 
1453 	/* Fill out the rte_eth_stats statistics structure */
1454 	stats->ipackets = hw_stats->rx_packets;
1455 	stats->ibytes = hw_stats->rx_bytes;
1456 	stats->opackets = hw_stats->tx_packets;
1457 	stats->obytes = hw_stats->tx_bytes;
1458 
1459 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1460 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1461 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1462 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1463 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1464 	for (i = 0; i < NGBE_MAX_QP; i++) {
1465 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1466 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1467 		uint32_t q_map;
1468 
1469 		q_map = (stat_mappings->rqsm[n] >> offset)
1470 				& QMAP_FIELD_RESERVED_BITS_MASK;
1471 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1472 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1473 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1474 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1475 
1476 		q_map = (stat_mappings->tqsm[n] >> offset)
1477 				& QMAP_FIELD_RESERVED_BITS_MASK;
1478 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1479 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1480 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1481 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1482 	}
1483 
1484 	/* Rx Errors */
1485 	stats->imissed  = hw_stats->rx_total_missed_packets +
1486 			  hw_stats->rx_dma_drop;
1487 	stats->ierrors  = hw_stats->rx_crc_errors +
1488 			  hw_stats->rx_mac_short_packet_dropped +
1489 			  hw_stats->rx_length_errors +
1490 			  hw_stats->rx_undersize_errors +
1491 			  hw_stats->rx_oversize_errors +
1492 			  hw_stats->rx_illegal_byte_errors +
1493 			  hw_stats->rx_error_bytes +
1494 			  hw_stats->rx_fragment_errors;
1495 
1496 	/* Tx Errors */
1497 	stats->oerrors  = 0;
1498 	return 0;
1499 }
1500 
1501 static int
1502 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1503 {
1504 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1505 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1506 
1507 	/* HW registers are cleared on read */
1508 	hw->offset_loaded = 0;
1509 	ngbe_dev_stats_get(dev, NULL);
1510 	hw->offset_loaded = 1;
1511 
1512 	/* Reset software totals */
1513 	memset(hw_stats, 0, sizeof(*hw_stats));
1514 
1515 	return 0;
1516 }
1517 
1518 /* This function calculates the number of xstats based on the current config */
1519 static unsigned
1520 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1521 {
1522 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1523 	return NGBE_NB_HW_STATS +
1524 	       NGBE_NB_QP_STATS * nb_queues;
1525 }
1526 
1527 static inline int
1528 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1529 {
1530 	int nb, st;
1531 
1532 	/* Extended stats from ngbe_hw_stats */
1533 	if (id < NGBE_NB_HW_STATS) {
1534 		snprintf(name, size, "[hw]%s",
1535 			rte_ngbe_stats_strings[id].name);
1536 		return 0;
1537 	}
1538 	id -= NGBE_NB_HW_STATS;
1539 
1540 	/* Queue Stats */
1541 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1542 		nb = id / NGBE_NB_QP_STATS;
1543 		st = id % NGBE_NB_QP_STATS;
1544 		snprintf(name, size, "[q%u]%s", nb,
1545 			rte_ngbe_qp_strings[st].name);
1546 		return 0;
1547 	}
1548 	id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1549 
1550 	return -(int)(id + 1);
1551 }
1552 
1553 static inline int
1554 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1555 {
1556 	int nb, st;
1557 
1558 	/* Extended stats from ngbe_hw_stats */
1559 	if (id < NGBE_NB_HW_STATS) {
1560 		*offset = rte_ngbe_stats_strings[id].offset;
1561 		return 0;
1562 	}
1563 	id -= NGBE_NB_HW_STATS;
1564 
1565 	/* Queue Stats */
1566 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1567 		nb = id / NGBE_NB_QP_STATS;
1568 		st = id % NGBE_NB_QP_STATS;
1569 		*offset = rte_ngbe_qp_strings[st].offset +
1570 			nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1571 		return 0;
1572 	}
1573 
1574 	return -1;
1575 }
1576 
1577 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1578 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1579 {
1580 	unsigned int i, count;
1581 
1582 	count = ngbe_xstats_calc_num(dev);
1583 	if (xstats_names == NULL)
1584 		return count;
1585 
1586 	/* Note: limit >= cnt_stats checked upstream
1587 	 * in rte_eth_xstats_names()
1588 	 */
1589 	limit = min(limit, count);
1590 
1591 	/* Extended stats from ngbe_hw_stats */
1592 	for (i = 0; i < limit; i++) {
1593 		if (ngbe_get_name_by_id(i, xstats_names[i].name,
1594 			sizeof(xstats_names[i].name))) {
1595 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1596 			break;
1597 		}
1598 	}
1599 
1600 	return i;
1601 }
1602 
1603 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1604 	const uint64_t *ids,
1605 	struct rte_eth_xstat_name *xstats_names,
1606 	unsigned int limit)
1607 {
1608 	unsigned int i;
1609 
1610 	if (ids == NULL)
1611 		return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1612 
1613 	for (i = 0; i < limit; i++) {
1614 		if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1615 				sizeof(xstats_names[i].name))) {
1616 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1617 			return -1;
1618 		}
1619 	}
1620 
1621 	return i;
1622 }
1623 
1624 static int
1625 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1626 					 unsigned int limit)
1627 {
1628 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1629 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1630 	unsigned int i, count;
1631 
1632 	ngbe_read_stats_registers(hw, hw_stats);
1633 
1634 	/* If this is a reset xstats is NULL, and we have cleared the
1635 	 * registers by reading them.
1636 	 */
1637 	count = ngbe_xstats_calc_num(dev);
1638 	if (xstats == NULL)
1639 		return count;
1640 
1641 	limit = min(limit, ngbe_xstats_calc_num(dev));
1642 
1643 	/* Extended stats from ngbe_hw_stats */
1644 	for (i = 0; i < limit; i++) {
1645 		uint32_t offset = 0;
1646 
1647 		if (ngbe_get_offset_by_id(i, &offset)) {
1648 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1649 			break;
1650 		}
1651 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1652 		xstats[i].id = i;
1653 	}
1654 
1655 	return i;
1656 }
1657 
1658 static int
1659 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1660 					 unsigned int limit)
1661 {
1662 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1663 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1664 	unsigned int i, count;
1665 
1666 	ngbe_read_stats_registers(hw, hw_stats);
1667 
1668 	/* If this is a reset xstats is NULL, and we have cleared the
1669 	 * registers by reading them.
1670 	 */
1671 	count = ngbe_xstats_calc_num(dev);
1672 	if (values == NULL)
1673 		return count;
1674 
1675 	limit = min(limit, ngbe_xstats_calc_num(dev));
1676 
1677 	/* Extended stats from ngbe_hw_stats */
1678 	for (i = 0; i < limit; i++) {
1679 		uint32_t offset;
1680 
1681 		if (ngbe_get_offset_by_id(i, &offset)) {
1682 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1683 			break;
1684 		}
1685 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1686 	}
1687 
1688 	return i;
1689 }
1690 
1691 static int
1692 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1693 		uint64_t *values, unsigned int limit)
1694 {
1695 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1696 	unsigned int i;
1697 
1698 	if (ids == NULL)
1699 		return ngbe_dev_xstats_get_(dev, values, limit);
1700 
1701 	for (i = 0; i < limit; i++) {
1702 		uint32_t offset;
1703 
1704 		if (ngbe_get_offset_by_id(ids[i], &offset)) {
1705 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1706 			break;
1707 		}
1708 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1709 	}
1710 
1711 	return i;
1712 }
1713 
1714 static int
1715 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1716 {
1717 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1718 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1719 
1720 	/* HW registers are cleared on read */
1721 	hw->offset_loaded = 0;
1722 	ngbe_read_stats_registers(hw, hw_stats);
1723 	hw->offset_loaded = 1;
1724 
1725 	/* Reset software totals */
1726 	memset(hw_stats, 0, sizeof(*hw_stats));
1727 
1728 	return 0;
1729 }
1730 
1731 static int
1732 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1733 {
1734 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1735 	int ret;
1736 
1737 	ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1738 
1739 	if (ret < 0)
1740 		return -EINVAL;
1741 
1742 	ret += 1; /* add the size of '\0' */
1743 	if (fw_size < (size_t)ret)
1744 		return ret;
1745 
1746 	return 0;
1747 }
1748 
1749 static int
1750 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1751 {
1752 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1753 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1754 
1755 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1756 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1757 	dev_info->min_rx_bufsize = 1024;
1758 	dev_info->max_rx_pktlen = 15872;
1759 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1760 	dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1761 	dev_info->max_vfs = pci_dev->max_vfs;
1762 	dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1763 	dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1764 				     dev_info->rx_queue_offload_capa);
1765 	dev_info->tx_queue_offload_capa = 0;
1766 	dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1767 
1768 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1769 		.rx_thresh = {
1770 			.pthresh = NGBE_DEFAULT_RX_PTHRESH,
1771 			.hthresh = NGBE_DEFAULT_RX_HTHRESH,
1772 			.wthresh = NGBE_DEFAULT_RX_WTHRESH,
1773 		},
1774 		.rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1775 		.rx_drop_en = 0,
1776 		.offloads = 0,
1777 	};
1778 
1779 	dev_info->default_txconf = (struct rte_eth_txconf) {
1780 		.tx_thresh = {
1781 			.pthresh = NGBE_DEFAULT_TX_PTHRESH,
1782 			.hthresh = NGBE_DEFAULT_TX_HTHRESH,
1783 			.wthresh = NGBE_DEFAULT_TX_WTHRESH,
1784 		},
1785 		.tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1786 		.offloads = 0,
1787 	};
1788 
1789 	dev_info->rx_desc_lim = rx_desc_lim;
1790 	dev_info->tx_desc_lim = tx_desc_lim;
1791 
1792 	dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1793 	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1794 	dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1795 
1796 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1797 				RTE_ETH_LINK_SPEED_10M;
1798 
1799 	/* Driver-preferred Rx/Tx parameters */
1800 	dev_info->default_rxportconf.burst_size = 32;
1801 	dev_info->default_txportconf.burst_size = 32;
1802 	dev_info->default_rxportconf.nb_queues = 1;
1803 	dev_info->default_txportconf.nb_queues = 1;
1804 	dev_info->default_rxportconf.ring_size = 256;
1805 	dev_info->default_txportconf.ring_size = 256;
1806 
1807 	return 0;
1808 }
1809 
1810 const uint32_t *
1811 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1812 {
1813 	if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1814 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1815 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1816 	    dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1817 		return ngbe_get_supported_ptypes();
1818 
1819 	return NULL;
1820 }
1821 
1822 void
1823 ngbe_dev_setup_link_alarm_handler(void *param)
1824 {
1825 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1826 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1827 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1828 	u32 speed;
1829 	bool autoneg = false;
1830 
1831 	speed = hw->phy.autoneg_advertised;
1832 	if (!speed)
1833 		hw->mac.get_link_capabilities(hw, &speed, &autoneg);
1834 
1835 	hw->mac.setup_link(hw, speed, true);
1836 
1837 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1838 }
1839 
1840 /* return 0 means link status changed, -1 means not changed */
1841 int
1842 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1843 			    int wait_to_complete)
1844 {
1845 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1846 	struct rte_eth_link link;
1847 	u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1848 	u32 lan_speed = 0;
1849 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1850 	bool link_up;
1851 	int err;
1852 	int wait = 1;
1853 
1854 	memset(&link, 0, sizeof(link));
1855 	link.link_status = RTE_ETH_LINK_DOWN;
1856 	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1857 	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1858 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1859 			~RTE_ETH_LINK_SPEED_AUTONEG);
1860 
1861 	hw->mac.get_link_status = true;
1862 
1863 	if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1864 		return rte_eth_linkstatus_set(dev, &link);
1865 
1866 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
1867 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1868 		wait = 0;
1869 
1870 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1871 	if (err != 0) {
1872 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1873 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1874 		return rte_eth_linkstatus_set(dev, &link);
1875 	}
1876 
1877 	if (!link_up) {
1878 		if (hw->phy.media_type == ngbe_media_type_fiber &&
1879 			hw->phy.type != ngbe_phy_mvl_sfi) {
1880 			intr->flags |= NGBE_FLAG_NEED_LINK_CONFIG;
1881 			rte_eal_alarm_set(10,
1882 				ngbe_dev_setup_link_alarm_handler, dev);
1883 		}
1884 
1885 		return rte_eth_linkstatus_set(dev, &link);
1886 	}
1887 
1888 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1889 	link.link_status = RTE_ETH_LINK_UP;
1890 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1891 
1892 	switch (link_speed) {
1893 	default:
1894 	case NGBE_LINK_SPEED_UNKNOWN:
1895 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1896 		break;
1897 
1898 	case NGBE_LINK_SPEED_10M_FULL:
1899 		link.link_speed = RTE_ETH_SPEED_NUM_10M;
1900 		lan_speed = 0;
1901 		break;
1902 
1903 	case NGBE_LINK_SPEED_100M_FULL:
1904 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
1905 		lan_speed = 1;
1906 		break;
1907 
1908 	case NGBE_LINK_SPEED_1GB_FULL:
1909 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
1910 		lan_speed = 2;
1911 		break;
1912 	}
1913 
1914 	if (hw->is_pf) {
1915 		wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1916 		if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1917 				NGBE_LINK_SPEED_100M_FULL |
1918 				NGBE_LINK_SPEED_10M_FULL)) {
1919 			wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1920 				NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1921 		}
1922 	}
1923 
1924 	return rte_eth_linkstatus_set(dev, &link);
1925 }
1926 
1927 static int
1928 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1929 {
1930 	return ngbe_dev_link_update_share(dev, wait_to_complete);
1931 }
1932 
1933 static int
1934 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1935 {
1936 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1937 	uint32_t fctrl;
1938 
1939 	fctrl = rd32(hw, NGBE_PSRCTL);
1940 	fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1941 	wr32(hw, NGBE_PSRCTL, fctrl);
1942 
1943 	return 0;
1944 }
1945 
1946 static int
1947 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1948 {
1949 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1950 	uint32_t fctrl;
1951 
1952 	fctrl = rd32(hw, NGBE_PSRCTL);
1953 	fctrl &= (~NGBE_PSRCTL_UCP);
1954 	if (dev->data->all_multicast == 1)
1955 		fctrl |= NGBE_PSRCTL_MCP;
1956 	else
1957 		fctrl &= (~NGBE_PSRCTL_MCP);
1958 	wr32(hw, NGBE_PSRCTL, fctrl);
1959 
1960 	return 0;
1961 }
1962 
1963 static int
1964 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1965 {
1966 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1967 	uint32_t fctrl;
1968 
1969 	fctrl = rd32(hw, NGBE_PSRCTL);
1970 	fctrl |= NGBE_PSRCTL_MCP;
1971 	wr32(hw, NGBE_PSRCTL, fctrl);
1972 
1973 	return 0;
1974 }
1975 
1976 static int
1977 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1978 {
1979 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1980 	uint32_t fctrl;
1981 
1982 	if (dev->data->promiscuous == 1)
1983 		return 0; /* must remain in all_multicast mode */
1984 
1985 	fctrl = rd32(hw, NGBE_PSRCTL);
1986 	fctrl &= (~NGBE_PSRCTL_MCP);
1987 	wr32(hw, NGBE_PSRCTL, fctrl);
1988 
1989 	return 0;
1990 }
1991 
1992 /**
1993  * It clears the interrupt causes and enables the interrupt.
1994  * It will be called once only during NIC initialized.
1995  *
1996  * @param dev
1997  *  Pointer to struct rte_eth_dev.
1998  * @param on
1999  *  Enable or Disable.
2000  *
2001  * @return
2002  *  - On success, zero.
2003  *  - On failure, a negative value.
2004  */
2005 static int
2006 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2007 {
2008 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2009 
2010 	ngbe_dev_link_status_print(dev);
2011 	if (on != 0) {
2012 		intr->mask_misc |= NGBE_ICRMISC_PHY;
2013 		intr->mask_misc |= NGBE_ICRMISC_GPIO;
2014 	} else {
2015 		intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2016 		intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
2017 	}
2018 
2019 	return 0;
2020 }
2021 
2022 /**
2023  * It clears the interrupt causes and enables the interrupt.
2024  * It will be called once only during NIC initialized.
2025  *
2026  * @param dev
2027  *  Pointer to struct rte_eth_dev.
2028  *
2029  * @return
2030  *  - On success, zero.
2031  *  - On failure, a negative value.
2032  */
2033 static int
2034 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
2035 {
2036 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2037 	u64 mask;
2038 
2039 	mask = NGBE_ICR_MASK;
2040 	mask &= (1ULL << NGBE_MISC_VEC_ID);
2041 	intr->mask |= mask;
2042 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
2043 
2044 	return 0;
2045 }
2046 
2047 /**
2048  * It clears the interrupt causes and enables the interrupt.
2049  * It will be called once only during NIC initialized.
2050  *
2051  * @param dev
2052  *  Pointer to struct rte_eth_dev.
2053  *
2054  * @return
2055  *  - On success, zero.
2056  *  - On failure, a negative value.
2057  */
2058 static int
2059 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2060 {
2061 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2062 	u64 mask;
2063 
2064 	mask = NGBE_ICR_MASK;
2065 	mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
2066 	intr->mask |= mask;
2067 
2068 	return 0;
2069 }
2070 
2071 /**
2072  * It clears the interrupt causes and enables the interrupt.
2073  * It will be called once only during NIC initialized.
2074  *
2075  * @param dev
2076  *  Pointer to struct rte_eth_dev.
2077  *
2078  * @return
2079  *  - On success, zero.
2080  *  - On failure, a negative value.
2081  */
2082 static int
2083 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2084 {
2085 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2086 
2087 	intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
2088 
2089 	return 0;
2090 }
2091 
2092 /*
2093  * It reads ICR and sets flag for the link_update.
2094  *
2095  * @param dev
2096  *  Pointer to struct rte_eth_dev.
2097  *
2098  * @return
2099  *  - On success, zero.
2100  *  - On failure, a negative value.
2101  */
2102 static int
2103 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2104 {
2105 	uint32_t eicr;
2106 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2107 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2108 
2109 	/* read-on-clear nic registers here */
2110 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2111 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2112 
2113 	intr->flags = 0;
2114 
2115 	/* set flag for async link update */
2116 	if (eicr & NGBE_ICRMISC_PHY)
2117 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2118 
2119 	if (eicr & NGBE_ICRMISC_VFMBX)
2120 		intr->flags |= NGBE_FLAG_MAILBOX;
2121 
2122 	if (eicr & NGBE_ICRMISC_LNKSEC)
2123 		intr->flags |= NGBE_FLAG_MACSEC;
2124 
2125 	if (eicr & NGBE_ICRMISC_GPIO)
2126 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2127 
2128 	((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0;
2129 
2130 	return 0;
2131 }
2132 
2133 /**
2134  * It gets and then prints the link status.
2135  *
2136  * @param dev
2137  *  Pointer to struct rte_eth_dev.
2138  *
2139  * @return
2140  *  - On success, zero.
2141  *  - On failure, a negative value.
2142  */
2143 static void
2144 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2145 {
2146 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2147 	struct rte_eth_link link;
2148 
2149 	rte_eth_linkstatus_get(dev, &link);
2150 
2151 	if (link.link_status == RTE_ETH_LINK_UP) {
2152 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2153 					(int)(dev->data->port_id),
2154 					(unsigned int)link.link_speed,
2155 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2156 					"full-duplex" : "half-duplex");
2157 	} else {
2158 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
2159 				(int)(dev->data->port_id));
2160 	}
2161 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2162 				pci_dev->addr.domain,
2163 				pci_dev->addr.bus,
2164 				pci_dev->addr.devid,
2165 				pci_dev->addr.function);
2166 }
2167 
2168 /*
2169  * It executes link_update after knowing an interrupt occurred.
2170  *
2171  * @param dev
2172  *  Pointer to struct rte_eth_dev.
2173  *
2174  * @return
2175  *  - On success, zero.
2176  *  - On failure, a negative value.
2177  */
2178 static int
2179 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2180 {
2181 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2182 
2183 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2184 
2185 	if (intr->flags & NGBE_FLAG_MAILBOX) {
2186 		ngbe_pf_mbx_process(dev);
2187 		intr->flags &= ~NGBE_FLAG_MAILBOX;
2188 	}
2189 
2190 	if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2191 		struct rte_eth_link link;
2192 
2193 		/*get the link status before link update, for predicting later*/
2194 		rte_eth_linkstatus_get(dev, &link);
2195 
2196 		ngbe_dev_link_update(dev, 0);
2197 		intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2198 		ngbe_dev_link_status_print(dev);
2199 		if (dev->data->dev_link.link_speed != link.link_speed)
2200 			rte_eth_dev_callback_process(dev,
2201 				RTE_ETH_EVENT_INTR_LSC, NULL);
2202 	}
2203 
2204 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
2205 	ngbe_enable_intr(dev);
2206 
2207 	return 0;
2208 }
2209 
2210 /**
2211  * Interrupt handler triggered by NIC  for handling
2212  * specific interrupt.
2213  *
2214  * @param param
2215  *  The address of parameter (struct rte_eth_dev *) registered before.
2216  */
2217 static void
2218 ngbe_dev_interrupt_handler(void *param)
2219 {
2220 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2221 
2222 	ngbe_dev_interrupt_get_status(dev);
2223 	ngbe_dev_interrupt_action(dev);
2224 }
2225 
2226 static int
2227 ngbe_dev_led_on(struct rte_eth_dev *dev)
2228 {
2229 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2230 	return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
2231 }
2232 
2233 static int
2234 ngbe_dev_led_off(struct rte_eth_dev *dev)
2235 {
2236 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2237 	return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
2238 }
2239 
2240 static int
2241 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2242 {
2243 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2244 	uint32_t mflcn_reg;
2245 	uint32_t fccfg_reg;
2246 	int rx_pause;
2247 	int tx_pause;
2248 
2249 	fc_conf->pause_time = hw->fc.pause_time;
2250 	fc_conf->high_water = hw->fc.high_water;
2251 	fc_conf->low_water = hw->fc.low_water;
2252 	fc_conf->send_xon = hw->fc.send_xon;
2253 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2254 
2255 	/*
2256 	 * Return rx_pause status according to actual setting of
2257 	 * RXFCCFG register.
2258 	 */
2259 	mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2260 	if (mflcn_reg & NGBE_RXFCCFG_FC)
2261 		rx_pause = 1;
2262 	else
2263 		rx_pause = 0;
2264 
2265 	/*
2266 	 * Return tx_pause status according to actual setting of
2267 	 * TXFCCFG register.
2268 	 */
2269 	fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2270 	if (fccfg_reg & NGBE_TXFCCFG_FC)
2271 		tx_pause = 1;
2272 	else
2273 		tx_pause = 0;
2274 
2275 	if (rx_pause && tx_pause)
2276 		fc_conf->mode = RTE_ETH_FC_FULL;
2277 	else if (rx_pause)
2278 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2279 	else if (tx_pause)
2280 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2281 	else
2282 		fc_conf->mode = RTE_ETH_FC_NONE;
2283 
2284 	return 0;
2285 }
2286 
2287 static int
2288 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2289 {
2290 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2291 	int err;
2292 	uint32_t rx_buf_size;
2293 	uint32_t max_high_water;
2294 	enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2295 		ngbe_fc_none,
2296 		ngbe_fc_rx_pause,
2297 		ngbe_fc_tx_pause,
2298 		ngbe_fc_full
2299 	};
2300 
2301 	PMD_INIT_FUNC_TRACE();
2302 
2303 	rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2304 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2305 
2306 	/*
2307 	 * At least reserve one Ethernet frame for watermark
2308 	 * high_water/low_water in kilo bytes for ngbe
2309 	 */
2310 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2311 	if (fc_conf->high_water > max_high_water ||
2312 	    fc_conf->high_water < fc_conf->low_water) {
2313 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2314 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2315 		return -EINVAL;
2316 	}
2317 
2318 	hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2319 	hw->fc.pause_time     = fc_conf->pause_time;
2320 	hw->fc.high_water     = fc_conf->high_water;
2321 	hw->fc.low_water      = fc_conf->low_water;
2322 	hw->fc.send_xon       = fc_conf->send_xon;
2323 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2324 
2325 	err = hw->mac.fc_enable(hw);
2326 
2327 	/* Not negotiated is not an error case */
2328 	if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2329 		wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2330 		      (fc_conf->mac_ctrl_frame_fwd
2331 		       ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2332 		ngbe_flush(hw);
2333 
2334 		return 0;
2335 	}
2336 
2337 	PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2338 	return -EIO;
2339 }
2340 
2341 int
2342 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2343 			  struct rte_eth_rss_reta_entry64 *reta_conf,
2344 			  uint16_t reta_size)
2345 {
2346 	uint8_t i, j, mask;
2347 	uint32_t reta;
2348 	uint16_t idx, shift;
2349 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2350 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2351 
2352 	PMD_INIT_FUNC_TRACE();
2353 
2354 	if (!hw->is_pf) {
2355 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2356 			"NIC.");
2357 		return -ENOTSUP;
2358 	}
2359 
2360 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2361 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2362 			"(%d) doesn't match the number hardware can supported "
2363 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2364 		return -EINVAL;
2365 	}
2366 
2367 	for (i = 0; i < reta_size; i += 4) {
2368 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2369 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2370 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2371 		if (!mask)
2372 			continue;
2373 
2374 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2375 		for (j = 0; j < 4; j++) {
2376 			if (RS8(mask, j, 0x1)) {
2377 				reta  &= ~(MS32(8 * j, 0xFF));
2378 				reta |= LS32(reta_conf[idx].reta[shift + j],
2379 						8 * j, 0xFF);
2380 			}
2381 		}
2382 		wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2383 	}
2384 	adapter->rss_reta_updated = 1;
2385 
2386 	return 0;
2387 }
2388 
2389 int
2390 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2391 			 struct rte_eth_rss_reta_entry64 *reta_conf,
2392 			 uint16_t reta_size)
2393 {
2394 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2395 	uint8_t i, j, mask;
2396 	uint32_t reta;
2397 	uint16_t idx, shift;
2398 
2399 	PMD_INIT_FUNC_TRACE();
2400 
2401 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2402 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2403 			"(%d) doesn't match the number hardware can supported "
2404 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2405 		return -EINVAL;
2406 	}
2407 
2408 	for (i = 0; i < reta_size; i += 4) {
2409 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2410 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2411 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2412 		if (!mask)
2413 			continue;
2414 
2415 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2416 		for (j = 0; j < 4; j++) {
2417 			if (RS8(mask, j, 0x1))
2418 				reta_conf[idx].reta[shift + j] =
2419 					(uint16_t)RS32(reta, 8 * j, 0xFF);
2420 		}
2421 	}
2422 
2423 	return 0;
2424 }
2425 
2426 static int
2427 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2428 				uint32_t index, uint32_t pool)
2429 {
2430 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2431 	uint32_t enable_addr = 1;
2432 
2433 	return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2434 			     pool, enable_addr);
2435 }
2436 
2437 static void
2438 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2439 {
2440 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2441 
2442 	ngbe_clear_rar(hw, index);
2443 }
2444 
2445 static int
2446 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2447 {
2448 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2449 
2450 	ngbe_remove_rar(dev, 0);
2451 	ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2452 
2453 	return 0;
2454 }
2455 
2456 static int
2457 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2458 {
2459 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2460 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2461 	struct rte_eth_dev_data *dev_data = dev->data;
2462 
2463 	/* If device is started, refuse mtu that requires the support of
2464 	 * scattered packets when this feature has not been enabled before.
2465 	 */
2466 	if (dev_data->dev_started && !dev_data->scattered_rx &&
2467 	    (frame_size + 2 * RTE_VLAN_HLEN >
2468 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2469 		PMD_INIT_LOG(ERR, "Stop port first.");
2470 		return -EINVAL;
2471 	}
2472 
2473 	if (hw->mode)
2474 		wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2475 			NGBE_FRAME_SIZE_MAX);
2476 	else
2477 		wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2478 			NGBE_FRMSZ_MAX(frame_size));
2479 
2480 	return 0;
2481 }
2482 
2483 static uint32_t
2484 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2485 {
2486 	uint32_t vector = 0;
2487 
2488 	switch (hw->mac.mc_filter_type) {
2489 	case 0:   /* use bits [47:36] of the address */
2490 		vector = ((uc_addr->addr_bytes[4] >> 4) |
2491 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
2492 		break;
2493 	case 1:   /* use bits [46:35] of the address */
2494 		vector = ((uc_addr->addr_bytes[4] >> 3) |
2495 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
2496 		break;
2497 	case 2:   /* use bits [45:34] of the address */
2498 		vector = ((uc_addr->addr_bytes[4] >> 2) |
2499 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
2500 		break;
2501 	case 3:   /* use bits [43:32] of the address */
2502 		vector = ((uc_addr->addr_bytes[4]) |
2503 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
2504 		break;
2505 	default:  /* Invalid mc_filter_type */
2506 		break;
2507 	}
2508 
2509 	/* vector can only be 12-bits or boundary will be exceeded */
2510 	vector &= 0xFFF;
2511 	return vector;
2512 }
2513 
2514 static int
2515 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2516 			struct rte_ether_addr *mac_addr, uint8_t on)
2517 {
2518 	uint32_t vector;
2519 	uint32_t uta_idx;
2520 	uint32_t reg_val;
2521 	uint32_t uta_mask;
2522 	uint32_t psrctl;
2523 
2524 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2525 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2526 
2527 	vector = ngbe_uta_vector(hw, mac_addr);
2528 	uta_idx = (vector >> 5) & 0x7F;
2529 	uta_mask = 0x1UL << (vector & 0x1F);
2530 
2531 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2532 		return 0;
2533 
2534 	reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2535 	if (on) {
2536 		uta_info->uta_in_use++;
2537 		reg_val |= uta_mask;
2538 		uta_info->uta_shadow[uta_idx] |= uta_mask;
2539 	} else {
2540 		uta_info->uta_in_use--;
2541 		reg_val &= ~uta_mask;
2542 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2543 	}
2544 
2545 	wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2546 
2547 	psrctl = rd32(hw, NGBE_PSRCTL);
2548 	if (uta_info->uta_in_use > 0)
2549 		psrctl |= NGBE_PSRCTL_UCHFENA;
2550 	else
2551 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2552 
2553 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2554 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2555 	wr32(hw, NGBE_PSRCTL, psrctl);
2556 
2557 	return 0;
2558 }
2559 
2560 static int
2561 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2562 {
2563 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2564 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2565 	uint32_t psrctl;
2566 	int i;
2567 
2568 	if (on) {
2569 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2570 			uta_info->uta_shadow[i] = ~0;
2571 			wr32(hw, NGBE_UCADDRTBL(i), ~0);
2572 		}
2573 	} else {
2574 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2575 			uta_info->uta_shadow[i] = 0;
2576 			wr32(hw, NGBE_UCADDRTBL(i), 0);
2577 		}
2578 	}
2579 
2580 	psrctl = rd32(hw, NGBE_PSRCTL);
2581 	if (on)
2582 		psrctl |= NGBE_PSRCTL_UCHFENA;
2583 	else
2584 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2585 
2586 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2587 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2588 	wr32(hw, NGBE_PSRCTL, psrctl);
2589 
2590 	return 0;
2591 }
2592 
2593 /**
2594  * Set the IVAR registers, mapping interrupt causes to vectors
2595  * @param hw
2596  *  pointer to ngbe_hw struct
2597  * @direction
2598  *  0 for Rx, 1 for Tx, -1 for other causes
2599  * @queue
2600  *  queue to map the corresponding interrupt to
2601  * @msix_vector
2602  *  the vector to map to the corresponding queue
2603  */
2604 void
2605 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2606 		   uint8_t queue, uint8_t msix_vector)
2607 {
2608 	uint32_t tmp, idx;
2609 
2610 	if (direction == -1) {
2611 		/* other causes */
2612 		msix_vector |= NGBE_IVARMISC_VLD;
2613 		idx = 0;
2614 		tmp = rd32(hw, NGBE_IVARMISC);
2615 		tmp &= ~(0xFF << idx);
2616 		tmp |= (msix_vector << idx);
2617 		wr32(hw, NGBE_IVARMISC, tmp);
2618 	} else {
2619 		/* rx or tx causes */
2620 		/* Workaround for ICR lost */
2621 		idx = ((16 * (queue & 1)) + (8 * direction));
2622 		tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2623 		tmp &= ~(0xFF << idx);
2624 		tmp |= (msix_vector << idx);
2625 		wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2626 	}
2627 }
2628 
2629 /**
2630  * Sets up the hardware to properly generate MSI-X interrupts
2631  * @hw
2632  *  board private structure
2633  */
2634 static void
2635 ngbe_configure_msix(struct rte_eth_dev *dev)
2636 {
2637 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2638 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2639 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2640 	uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2641 	uint32_t vec = NGBE_MISC_VEC_ID;
2642 	uint32_t gpie;
2643 
2644 	/*
2645 	 * Won't configure MSI-X register if no mapping is done
2646 	 * between intr vector and event fd
2647 	 * but if MSI-X has been enabled already, need to configure
2648 	 * auto clean, auto mask and throttling.
2649 	 */
2650 	gpie = rd32(hw, NGBE_GPIE);
2651 	if (!rte_intr_dp_is_en(intr_handle) &&
2652 	    !(gpie & NGBE_GPIE_MSIX))
2653 		return;
2654 
2655 	if (rte_intr_allow_others(intr_handle)) {
2656 		base = NGBE_RX_VEC_START;
2657 		vec = base;
2658 	}
2659 
2660 	/* setup GPIE for MSI-X mode */
2661 	gpie = rd32(hw, NGBE_GPIE);
2662 	gpie |= NGBE_GPIE_MSIX;
2663 	wr32(hw, NGBE_GPIE, gpie);
2664 
2665 	/* Populate the IVAR table and set the ITR values to the
2666 	 * corresponding register.
2667 	 */
2668 	if (rte_intr_dp_is_en(intr_handle)) {
2669 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2670 			queue_id++) {
2671 			/* by default, 1:1 mapping */
2672 			ngbe_set_ivar_map(hw, 0, queue_id, vec);
2673 			rte_intr_vec_list_index_set(intr_handle,
2674 							   queue_id, vec);
2675 			if (vec < base + rte_intr_nb_efd_get(intr_handle)
2676 			    - 1)
2677 				vec++;
2678 		}
2679 
2680 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2681 	}
2682 	wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2683 			NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2684 			| NGBE_ITR_WRDSA);
2685 }
2686 
2687 static u8 *
2688 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2689 			u8 **mc_addr_ptr, u32 *vmdq)
2690 {
2691 	u8 *mc_addr;
2692 
2693 	*vmdq = 0;
2694 	mc_addr = *mc_addr_ptr;
2695 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2696 	return mc_addr;
2697 }
2698 
2699 int
2700 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2701 			  struct rte_ether_addr *mc_addr_set,
2702 			  uint32_t nb_mc_addr)
2703 {
2704 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2705 	u8 *mc_addr_list;
2706 
2707 	mc_addr_list = (u8 *)mc_addr_set;
2708 	return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2709 					 ngbe_dev_addr_list_itr, TRUE);
2710 }
2711 
2712 static uint64_t
2713 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
2714 {
2715 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2716 	uint64_t systime_cycles;
2717 
2718 	systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
2719 	systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
2720 
2721 	return systime_cycles;
2722 }
2723 
2724 static uint64_t
2725 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2726 {
2727 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2728 	uint64_t rx_tstamp_cycles;
2729 
2730 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
2731 	rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
2732 	rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
2733 
2734 	return rx_tstamp_cycles;
2735 }
2736 
2737 static uint64_t
2738 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2739 {
2740 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2741 	uint64_t tx_tstamp_cycles;
2742 
2743 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
2744 	tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
2745 	tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
2746 
2747 	return tx_tstamp_cycles;
2748 }
2749 
2750 static void
2751 ngbe_start_timecounters(struct rte_eth_dev *dev)
2752 {
2753 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2754 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2755 	uint32_t incval = 0;
2756 	uint32_t shift = 0;
2757 
2758 	incval = NGBE_INCVAL_1GB;
2759 	shift = NGBE_INCVAL_SHIFT_1GB;
2760 
2761 	wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
2762 
2763 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
2764 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2765 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2766 
2767 	adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2768 	adapter->systime_tc.cc_shift = shift;
2769 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
2770 
2771 	adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2772 	adapter->rx_tstamp_tc.cc_shift = shift;
2773 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2774 
2775 	adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2776 	adapter->tx_tstamp_tc.cc_shift = shift;
2777 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2778 }
2779 
2780 static int
2781 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2782 {
2783 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2784 
2785 	adapter->systime_tc.nsec += delta;
2786 	adapter->rx_tstamp_tc.nsec += delta;
2787 	adapter->tx_tstamp_tc.nsec += delta;
2788 
2789 	return 0;
2790 }
2791 
2792 static int
2793 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2794 {
2795 	uint64_t ns;
2796 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2797 
2798 	ns = rte_timespec_to_ns(ts);
2799 	/* Set the timecounters to a new value. */
2800 	adapter->systime_tc.nsec = ns;
2801 	adapter->rx_tstamp_tc.nsec = ns;
2802 	adapter->tx_tstamp_tc.nsec = ns;
2803 
2804 	return 0;
2805 }
2806 
2807 static int
2808 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2809 {
2810 	uint64_t ns, systime_cycles;
2811 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2812 
2813 	systime_cycles = ngbe_read_systime_cyclecounter(dev);
2814 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
2815 	*ts = rte_ns_to_timespec(ns);
2816 
2817 	return 0;
2818 }
2819 
2820 static int
2821 ngbe_timesync_enable(struct rte_eth_dev *dev)
2822 {
2823 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2824 	uint32_t tsync_ctl;
2825 
2826 	/* Stop the timesync system time. */
2827 	wr32(hw, NGBE_TSTIMEINC, 0x0);
2828 	/* Reset the timesync system time value. */
2829 	wr32(hw, NGBE_TSTIMEL, 0x0);
2830 	wr32(hw, NGBE_TSTIMEH, 0x0);
2831 
2832 	ngbe_start_timecounters(dev);
2833 
2834 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2835 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588),
2836 		RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588);
2837 
2838 	/* Enable timestamping of received PTP packets. */
2839 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2840 	tsync_ctl |= NGBE_TSRXCTL_ENA;
2841 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2842 
2843 	/* Enable timestamping of transmitted PTP packets. */
2844 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2845 	tsync_ctl |= NGBE_TSTXCTL_ENA;
2846 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2847 
2848 	ngbe_flush(hw);
2849 
2850 	return 0;
2851 }
2852 
2853 static int
2854 ngbe_timesync_disable(struct rte_eth_dev *dev)
2855 {
2856 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2857 	uint32_t tsync_ctl;
2858 
2859 	/* Disable timestamping of transmitted PTP packets. */
2860 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2861 	tsync_ctl &= ~NGBE_TSTXCTL_ENA;
2862 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2863 
2864 	/* Disable timestamping of received PTP packets. */
2865 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2866 	tsync_ctl &= ~NGBE_TSRXCTL_ENA;
2867 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2868 
2869 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2870 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
2871 
2872 	/* Stop incrementing the System Time registers. */
2873 	wr32(hw, NGBE_TSTIMEINC, 0);
2874 
2875 	return 0;
2876 }
2877 
2878 static int
2879 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
2880 				 struct timespec *timestamp,
2881 				 uint32_t flags __rte_unused)
2882 {
2883 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2884 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2885 	uint32_t tsync_rxctl;
2886 	uint64_t rx_tstamp_cycles;
2887 	uint64_t ns;
2888 
2889 	tsync_rxctl = rd32(hw, NGBE_TSRXCTL);
2890 	if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0)
2891 		return -EINVAL;
2892 
2893 	rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev);
2894 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
2895 	*timestamp = rte_ns_to_timespec(ns);
2896 
2897 	return  0;
2898 }
2899 
2900 static int
2901 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
2902 				 struct timespec *timestamp)
2903 {
2904 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2905 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2906 	uint32_t tsync_txctl;
2907 	uint64_t tx_tstamp_cycles;
2908 	uint64_t ns;
2909 
2910 	tsync_txctl = rd32(hw, NGBE_TSTXCTL);
2911 	if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0)
2912 		return -EINVAL;
2913 
2914 	tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev);
2915 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
2916 	*timestamp = rte_ns_to_timespec(ns);
2917 
2918 	return 0;
2919 }
2920 
2921 static int
2922 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
2923 {
2924 	int count = 0;
2925 	int g_ind = 0;
2926 	const struct reg_info *reg_group;
2927 	const struct reg_info **reg_set = ngbe_regs_others;
2928 
2929 	while ((reg_group = reg_set[g_ind++]))
2930 		count += ngbe_regs_group_count(reg_group);
2931 
2932 	return count;
2933 }
2934 
2935 static int
2936 ngbe_get_regs(struct rte_eth_dev *dev,
2937 	      struct rte_dev_reg_info *regs)
2938 {
2939 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2940 	uint32_t *data = regs->data;
2941 	int g_ind = 0;
2942 	int count = 0;
2943 	const struct reg_info *reg_group;
2944 	const struct reg_info **reg_set = ngbe_regs_others;
2945 
2946 	if (data == NULL) {
2947 		regs->length = ngbe_get_reg_length(dev);
2948 		regs->width = sizeof(uint32_t);
2949 		return 0;
2950 	}
2951 
2952 	/* Support only full register dump */
2953 	if (regs->length == 0 ||
2954 	    regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
2955 		regs->version = hw->mac.type << 24 |
2956 				hw->revision_id << 16 |
2957 				hw->device_id;
2958 		while ((reg_group = reg_set[g_ind++]))
2959 			count += ngbe_read_regs_group(dev, &data[count],
2960 						      reg_group);
2961 		return 0;
2962 	}
2963 
2964 	return -ENOTSUP;
2965 }
2966 
2967 static int
2968 ngbe_get_eeprom_length(struct rte_eth_dev *dev)
2969 {
2970 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2971 
2972 	/* Return unit is byte count */
2973 	return hw->rom.word_size * 2;
2974 }
2975 
2976 static int
2977 ngbe_get_eeprom(struct rte_eth_dev *dev,
2978 		struct rte_dev_eeprom_info *in_eeprom)
2979 {
2980 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2981 	struct ngbe_rom_info *eeprom = &hw->rom;
2982 	uint16_t *data = in_eeprom->data;
2983 	int first, length;
2984 
2985 	first = in_eeprom->offset >> 1;
2986 	length = in_eeprom->length >> 1;
2987 	if (first > hw->rom.word_size ||
2988 	    ((first + length) > hw->rom.word_size))
2989 		return -EINVAL;
2990 
2991 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
2992 
2993 	return eeprom->readw_buffer(hw, first, length, data);
2994 }
2995 
2996 static int
2997 ngbe_set_eeprom(struct rte_eth_dev *dev,
2998 		struct rte_dev_eeprom_info *in_eeprom)
2999 {
3000 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3001 	struct ngbe_rom_info *eeprom = &hw->rom;
3002 	uint16_t *data = in_eeprom->data;
3003 	int first, length;
3004 
3005 	first = in_eeprom->offset >> 1;
3006 	length = in_eeprom->length >> 1;
3007 	if (first > hw->rom.word_size ||
3008 	    ((first + length) > hw->rom.word_size))
3009 		return -EINVAL;
3010 
3011 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3012 
3013 	return eeprom->writew_buffer(hw,  first, length, data);
3014 }
3015 
3016 static const struct eth_dev_ops ngbe_eth_dev_ops = {
3017 	.dev_configure              = ngbe_dev_configure,
3018 	.dev_infos_get              = ngbe_dev_info_get,
3019 	.dev_start                  = ngbe_dev_start,
3020 	.dev_stop                   = ngbe_dev_stop,
3021 	.dev_close                  = ngbe_dev_close,
3022 	.dev_reset                  = ngbe_dev_reset,
3023 	.promiscuous_enable         = ngbe_dev_promiscuous_enable,
3024 	.promiscuous_disable        = ngbe_dev_promiscuous_disable,
3025 	.allmulticast_enable        = ngbe_dev_allmulticast_enable,
3026 	.allmulticast_disable       = ngbe_dev_allmulticast_disable,
3027 	.link_update                = ngbe_dev_link_update,
3028 	.stats_get                  = ngbe_dev_stats_get,
3029 	.xstats_get                 = ngbe_dev_xstats_get,
3030 	.xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
3031 	.stats_reset                = ngbe_dev_stats_reset,
3032 	.xstats_reset               = ngbe_dev_xstats_reset,
3033 	.xstats_get_names           = ngbe_dev_xstats_get_names,
3034 	.xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
3035 	.fw_version_get             = ngbe_fw_version_get,
3036 	.dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
3037 	.mtu_set                    = ngbe_dev_mtu_set,
3038 	.vlan_filter_set            = ngbe_vlan_filter_set,
3039 	.vlan_tpid_set              = ngbe_vlan_tpid_set,
3040 	.vlan_offload_set           = ngbe_vlan_offload_set,
3041 	.vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
3042 	.rx_queue_start	            = ngbe_dev_rx_queue_start,
3043 	.rx_queue_stop              = ngbe_dev_rx_queue_stop,
3044 	.tx_queue_start	            = ngbe_dev_tx_queue_start,
3045 	.tx_queue_stop              = ngbe_dev_tx_queue_stop,
3046 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
3047 	.rx_queue_release           = ngbe_dev_rx_queue_release,
3048 	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
3049 	.tx_queue_release           = ngbe_dev_tx_queue_release,
3050 	.dev_led_on                 = ngbe_dev_led_on,
3051 	.dev_led_off                = ngbe_dev_led_off,
3052 	.flow_ctrl_get              = ngbe_flow_ctrl_get,
3053 	.flow_ctrl_set              = ngbe_flow_ctrl_set,
3054 	.mac_addr_add               = ngbe_add_rar,
3055 	.mac_addr_remove            = ngbe_remove_rar,
3056 	.mac_addr_set               = ngbe_set_default_mac_addr,
3057 	.uc_hash_table_set          = ngbe_uc_hash_table_set,
3058 	.uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
3059 	.reta_update                = ngbe_dev_rss_reta_update,
3060 	.reta_query                 = ngbe_dev_rss_reta_query,
3061 	.rss_hash_update            = ngbe_dev_rss_hash_update,
3062 	.rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
3063 	.set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
3064 	.rxq_info_get               = ngbe_rxq_info_get,
3065 	.txq_info_get               = ngbe_txq_info_get,
3066 	.rx_burst_mode_get          = ngbe_rx_burst_mode_get,
3067 	.tx_burst_mode_get          = ngbe_tx_burst_mode_get,
3068 	.timesync_enable            = ngbe_timesync_enable,
3069 	.timesync_disable           = ngbe_timesync_disable,
3070 	.timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
3071 	.timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp,
3072 	.get_reg                    = ngbe_get_regs,
3073 	.get_eeprom_length          = ngbe_get_eeprom_length,
3074 	.get_eeprom                 = ngbe_get_eeprom,
3075 	.set_eeprom                 = ngbe_set_eeprom,
3076 	.timesync_adjust_time       = ngbe_timesync_adjust_time,
3077 	.timesync_read_time         = ngbe_timesync_read_time,
3078 	.timesync_write_time        = ngbe_timesync_write_time,
3079 	.tx_done_cleanup            = ngbe_dev_tx_done_cleanup,
3080 };
3081 
3082 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
3083 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
3084 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
3085 
3086 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
3087 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
3088 
3089 #ifdef RTE_ETHDEV_DEBUG_RX
3090 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
3091 #endif
3092 #ifdef RTE_ETHDEV_DEBUG_TX
3093 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
3094 #endif
3095