xref: /dpdk/drivers/net/ngbe/ngbe_ethdev.c (revision 448e01f1b5848b20cb0300d339100dd82f4459e9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9 
10 #include <rte_alarm.h>
11 
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16 #include "ngbe_regs_group.h"
17 
18 static const struct reg_info ngbe_regs_general[] = {
19 	{NGBE_RST, 1, 1, "NGBE_RST"},
20 	{NGBE_STAT, 1, 1, "NGBE_STAT"},
21 	{NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
22 	{NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
23 	{NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
24 	{NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
25 	{0, 0, 0, ""}
26 };
27 
28 static const struct reg_info ngbe_regs_nvm[] = {
29 	{0, 0, 0, ""}
30 };
31 
32 static const struct reg_info ngbe_regs_interrupt[] = {
33 	{0, 0, 0, ""}
34 };
35 
36 static const struct reg_info ngbe_regs_fctl_others[] = {
37 	{0, 0, 0, ""}
38 };
39 
40 static const struct reg_info ngbe_regs_rxdma[] = {
41 	{0, 0, 0, ""}
42 };
43 
44 static const struct reg_info ngbe_regs_rx[] = {
45 	{0, 0, 0, ""}
46 };
47 
48 static struct reg_info ngbe_regs_tx[] = {
49 	{0, 0, 0, ""}
50 };
51 
52 static const struct reg_info ngbe_regs_wakeup[] = {
53 	{0, 0, 0, ""}
54 };
55 
56 static const struct reg_info ngbe_regs_mac[] = {
57 	{0, 0, 0, ""}
58 };
59 
60 static const struct reg_info ngbe_regs_diagnostic[] = {
61 	{0, 0, 0, ""},
62 };
63 
64 /* PF registers */
65 static const struct reg_info *ngbe_regs_others[] = {
66 				ngbe_regs_general,
67 				ngbe_regs_nvm,
68 				ngbe_regs_interrupt,
69 				ngbe_regs_fctl_others,
70 				ngbe_regs_rxdma,
71 				ngbe_regs_rx,
72 				ngbe_regs_tx,
73 				ngbe_regs_wakeup,
74 				ngbe_regs_mac,
75 				ngbe_regs_diagnostic,
76 				NULL};
77 
78 static int ngbe_dev_close(struct rte_eth_dev *dev);
79 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
80 				int wait_to_complete);
81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
84 					uint16_t queue);
85 
86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
91 static void ngbe_dev_interrupt_handler(void *param);
92 static void ngbe_configure_msix(struct rte_eth_dev *dev);
93 
94 #define NGBE_SET_HWSTRIP(h, q) do {\
95 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
96 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
97 		(h)->bitmap[idx] |= 1 << bit;\
98 	} while (0)
99 
100 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
101 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
102 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
103 		(h)->bitmap[idx] &= ~(1 << bit);\
104 	} while (0)
105 
106 #define NGBE_GET_HWSTRIP(h, q, r) do {\
107 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
108 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
109 		(r) = (h)->bitmap[idx] >> bit & 1;\
110 	} while (0)
111 
112 /*
113  * The set of PCI devices this driver supports
114  */
115 static const struct rte_pci_id pci_id_ngbe_map[] = {
116 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
117 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
118 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
119 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
120 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
121 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
122 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
123 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
124 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
128 	{ .vendor_id = 0, /* sentinel */ },
129 };
130 
131 static const struct rte_eth_desc_lim rx_desc_lim = {
132 	.nb_max = NGBE_RING_DESC_MAX,
133 	.nb_min = NGBE_RING_DESC_MIN,
134 	.nb_align = NGBE_RXD_ALIGN,
135 };
136 
137 static const struct rte_eth_desc_lim tx_desc_lim = {
138 	.nb_max = NGBE_RING_DESC_MAX,
139 	.nb_min = NGBE_RING_DESC_MIN,
140 	.nb_align = NGBE_TXD_ALIGN,
141 	.nb_seg_max = NGBE_TX_MAX_SEG,
142 	.nb_mtu_seg_max = NGBE_TX_MAX_SEG,
143 };
144 
145 static const struct eth_dev_ops ngbe_eth_dev_ops;
146 
147 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
148 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
149 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
150 	/* MNG RxTx */
151 	HW_XSTAT(mng_bmc2host_packets),
152 	HW_XSTAT(mng_host2bmc_packets),
153 	/* Basic RxTx */
154 	HW_XSTAT(rx_packets),
155 	HW_XSTAT(tx_packets),
156 	HW_XSTAT(rx_bytes),
157 	HW_XSTAT(tx_bytes),
158 	HW_XSTAT(rx_total_bytes),
159 	HW_XSTAT(rx_total_packets),
160 	HW_XSTAT(tx_total_packets),
161 	HW_XSTAT(rx_total_missed_packets),
162 	HW_XSTAT(rx_broadcast_packets),
163 	HW_XSTAT(rx_multicast_packets),
164 	HW_XSTAT(rx_management_packets),
165 	HW_XSTAT(tx_management_packets),
166 	HW_XSTAT(rx_management_dropped),
167 	HW_XSTAT(rx_dma_drop),
168 	HW_XSTAT(tx_secdrp_packets),
169 
170 	/* Basic Error */
171 	HW_XSTAT(rx_crc_errors),
172 	HW_XSTAT(rx_illegal_byte_errors),
173 	HW_XSTAT(rx_error_bytes),
174 	HW_XSTAT(rx_mac_short_packet_dropped),
175 	HW_XSTAT(rx_length_errors),
176 	HW_XSTAT(rx_undersize_errors),
177 	HW_XSTAT(rx_fragment_errors),
178 	HW_XSTAT(rx_oversize_errors),
179 	HW_XSTAT(rx_jabber_errors),
180 	HW_XSTAT(rx_l3_l4_xsum_error),
181 	HW_XSTAT(mac_local_errors),
182 	HW_XSTAT(mac_remote_errors),
183 
184 	/* PB Stats */
185 	HW_XSTAT(rx_up_dropped),
186 	HW_XSTAT(rdb_pkt_cnt),
187 	HW_XSTAT(rdb_repli_cnt),
188 	HW_XSTAT(rdb_drp_cnt),
189 
190 	/* MACSEC */
191 	HW_XSTAT(tx_macsec_pkts_untagged),
192 	HW_XSTAT(tx_macsec_pkts_encrypted),
193 	HW_XSTAT(tx_macsec_pkts_protected),
194 	HW_XSTAT(tx_macsec_octets_encrypted),
195 	HW_XSTAT(tx_macsec_octets_protected),
196 	HW_XSTAT(rx_macsec_pkts_untagged),
197 	HW_XSTAT(rx_macsec_pkts_badtag),
198 	HW_XSTAT(rx_macsec_pkts_nosci),
199 	HW_XSTAT(rx_macsec_pkts_unknownsci),
200 	HW_XSTAT(rx_macsec_octets_decrypted),
201 	HW_XSTAT(rx_macsec_octets_validated),
202 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
203 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
204 	HW_XSTAT(rx_macsec_sc_pkts_late),
205 	HW_XSTAT(rx_macsec_sa_pkts_ok),
206 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
207 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
208 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
209 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
210 
211 	/* MAC RxTx */
212 	HW_XSTAT(rx_size_64_packets),
213 	HW_XSTAT(rx_size_65_to_127_packets),
214 	HW_XSTAT(rx_size_128_to_255_packets),
215 	HW_XSTAT(rx_size_256_to_511_packets),
216 	HW_XSTAT(rx_size_512_to_1023_packets),
217 	HW_XSTAT(rx_size_1024_to_max_packets),
218 	HW_XSTAT(tx_size_64_packets),
219 	HW_XSTAT(tx_size_65_to_127_packets),
220 	HW_XSTAT(tx_size_128_to_255_packets),
221 	HW_XSTAT(tx_size_256_to_511_packets),
222 	HW_XSTAT(tx_size_512_to_1023_packets),
223 	HW_XSTAT(tx_size_1024_to_max_packets),
224 
225 	/* Flow Control */
226 	HW_XSTAT(tx_xon_packets),
227 	HW_XSTAT(rx_xon_packets),
228 	HW_XSTAT(tx_xoff_packets),
229 	HW_XSTAT(rx_xoff_packets),
230 
231 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
232 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
233 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
234 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
235 };
236 
237 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
238 			   sizeof(rte_ngbe_stats_strings[0]))
239 
240 /* Per-queue statistics */
241 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
242 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
243 	QP_XSTAT(rx_qp_packets),
244 	QP_XSTAT(tx_qp_packets),
245 	QP_XSTAT(rx_qp_bytes),
246 	QP_XSTAT(tx_qp_bytes),
247 	QP_XSTAT(rx_qp_mc_packets),
248 };
249 
250 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
251 			   sizeof(rte_ngbe_qp_strings[0]))
252 
253 static inline int32_t
254 ngbe_pf_reset_hw(struct ngbe_hw *hw)
255 {
256 	uint32_t ctrl_ext;
257 	int32_t status;
258 
259 	status = hw->mac.reset_hw(hw);
260 
261 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
262 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
263 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
264 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
265 	ngbe_flush(hw);
266 
267 	if (status == NGBE_ERR_SFP_NOT_PRESENT)
268 		status = 0;
269 	return status;
270 }
271 
272 static inline void
273 ngbe_enable_intr(struct rte_eth_dev *dev)
274 {
275 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
276 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
277 
278 	wr32(hw, NGBE_IENMISC, intr->mask_misc);
279 	wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
280 	ngbe_flush(hw);
281 }
282 
283 static void
284 ngbe_disable_intr(struct ngbe_hw *hw)
285 {
286 	PMD_INIT_FUNC_TRACE();
287 
288 	wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
289 	ngbe_flush(hw);
290 }
291 
292 /*
293  * Ensure that all locks are released before first NVM or PHY access
294  */
295 static void
296 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
297 {
298 	uint16_t mask;
299 
300 	/*
301 	 * These ones are more tricky since they are common to all ports; but
302 	 * swfw_sync retries last long enough (1s) to be almost sure that if
303 	 * lock can not be taken it is due to an improper lock of the
304 	 * semaphore.
305 	 */
306 	mask = NGBE_MNGSEM_SWPHY |
307 	       NGBE_MNGSEM_SWMBX |
308 	       NGBE_MNGSEM_SWFLASH;
309 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
310 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
311 
312 	hw->mac.release_swfw_sync(hw, mask);
313 }
314 
315 static int
316 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
317 {
318 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
319 	struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
320 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
321 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
322 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
323 	const struct rte_memzone *mz;
324 	uint32_t ctrl_ext;
325 	u32 led_conf = 0;
326 	int err, ret;
327 
328 	PMD_INIT_FUNC_TRACE();
329 
330 	eth_dev->dev_ops = &ngbe_eth_dev_ops;
331 	eth_dev->rx_queue_count       = ngbe_dev_rx_queue_count;
332 	eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
333 	eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
334 	eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
335 	eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
336 	eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
337 
338 	/*
339 	 * For secondary processes, we don't initialise any further as primary
340 	 * has already done this work. Only check we don't need a different
341 	 * Rx and Tx function.
342 	 */
343 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
344 		struct ngbe_tx_queue *txq;
345 		/* Tx queue function in primary, set by last queue initialized
346 		 * Tx queue may not initialized by primary process
347 		 */
348 		if (eth_dev->data->tx_queues) {
349 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
350 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
351 			ngbe_set_tx_function(eth_dev, txq);
352 		} else {
353 			/* Use default Tx function if we get here */
354 			PMD_INIT_LOG(NOTICE,
355 				"No Tx queues configured yet. Using default Tx function.");
356 		}
357 
358 		ngbe_set_rx_function(eth_dev);
359 
360 		return 0;
361 	}
362 
363 	rte_eth_copy_pci_info(eth_dev, pci_dev);
364 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
365 
366 	/* Vendor and Device ID need to be set before init of shared code */
367 	hw->back = pci_dev;
368 	hw->device_id = pci_dev->id.device_id;
369 	hw->vendor_id = pci_dev->id.vendor_id;
370 	if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) {
371 		hw->sub_system_id = pci_dev->id.subsystem_device_id;
372 	} else {
373 		u32 ssid;
374 
375 		ssid = ngbe_flash_read_dword(hw, 0xFFFDC);
376 		if (ssid == 0x1) {
377 			PMD_INIT_LOG(ERR,
378 				"Read of internal subsystem device id failed\n");
379 			return -ENODEV;
380 		}
381 		hw->sub_system_id = (u16)ssid >> 8 | (u16)ssid << 8;
382 	}
383 	ngbe_map_device_id(hw);
384 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
385 
386 	/* Reserve memory for interrupt status block */
387 	mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
388 		NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
389 	if (mz == NULL)
390 		return -ENOMEM;
391 
392 	hw->isb_dma = TMZ_PADDR(mz);
393 	hw->isb_mem = TMZ_VADDR(mz);
394 
395 	/* Initialize the shared code (base driver) */
396 	err = ngbe_init_shared_code(hw);
397 	if (err != 0) {
398 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
399 		return -EIO;
400 	}
401 
402 	/* Unlock any pending hardware semaphore */
403 	ngbe_swfw_lock_reset(hw);
404 
405 	/* Get Hardware Flow Control setting */
406 	hw->fc.requested_mode = ngbe_fc_full;
407 	hw->fc.current_mode = ngbe_fc_full;
408 	hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
409 	hw->fc.low_water = NGBE_FC_XON_LOTH;
410 	hw->fc.high_water = NGBE_FC_XOFF_HITH;
411 	hw->fc.send_xon = 1;
412 
413 	err = hw->rom.init_params(hw);
414 	if (err != 0) {
415 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
416 		return -EIO;
417 	}
418 
419 	/* Make sure we have a good EEPROM before we read from it */
420 	err = hw->rom.validate_checksum(hw, NULL);
421 	if (err != 0) {
422 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
423 		return -EIO;
424 	}
425 
426 	err = hw->phy.led_oem_chk(hw, &led_conf);
427 	if (err == 0)
428 		hw->led_conf = led_conf;
429 	else
430 		hw->led_conf = 0xFFFF;
431 
432 	err = hw->mac.init_hw(hw);
433 	if (err != 0) {
434 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
435 		return -EIO;
436 	}
437 
438 	/* Reset the hw statistics */
439 	ngbe_dev_stats_reset(eth_dev);
440 
441 	/* disable interrupt */
442 	ngbe_disable_intr(hw);
443 
444 	/* Allocate memory for storing MAC addresses */
445 	eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
446 					       hw->mac.num_rar_entries, 0);
447 	if (eth_dev->data->mac_addrs == NULL) {
448 		PMD_INIT_LOG(ERR,
449 			     "Failed to allocate %u bytes needed to store MAC addresses",
450 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
451 		return -ENOMEM;
452 	}
453 
454 	/* Copy the permanent MAC address */
455 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
456 			&eth_dev->data->mac_addrs[0]);
457 
458 	/* Allocate memory for storing hash filter MAC addresses */
459 	eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
460 			RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
461 	if (eth_dev->data->hash_mac_addrs == NULL) {
462 		PMD_INIT_LOG(ERR,
463 			     "Failed to allocate %d bytes needed to store MAC addresses",
464 			     RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
465 		rte_free(eth_dev->data->mac_addrs);
466 		eth_dev->data->mac_addrs = NULL;
467 		return -ENOMEM;
468 	}
469 
470 	/* initialize the vfta */
471 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
472 
473 	/* initialize the hw strip bitmap*/
474 	memset(hwstrip, 0, sizeof(*hwstrip));
475 
476 	/* initialize PF if max_vfs not zero */
477 	ret = ngbe_pf_host_init(eth_dev);
478 	if (ret) {
479 		rte_free(eth_dev->data->mac_addrs);
480 		eth_dev->data->mac_addrs = NULL;
481 		rte_free(eth_dev->data->hash_mac_addrs);
482 		eth_dev->data->hash_mac_addrs = NULL;
483 		return ret;
484 	}
485 
486 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
487 	/* let hardware know driver is loaded */
488 	ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
489 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
490 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
491 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
492 	ngbe_flush(hw);
493 
494 	PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
495 			(int)hw->mac.type, (int)hw->phy.type);
496 
497 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
498 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
499 		     pci_dev->id.device_id);
500 
501 	rte_intr_callback_register(intr_handle,
502 				   ngbe_dev_interrupt_handler, eth_dev);
503 
504 	/* enable uio/vfio intr/eventfd mapping */
505 	rte_intr_enable(intr_handle);
506 
507 	/* enable support intr */
508 	ngbe_enable_intr(eth_dev);
509 
510 	return 0;
511 }
512 
513 static int
514 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
515 {
516 	PMD_INIT_FUNC_TRACE();
517 
518 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
519 		return 0;
520 
521 	ngbe_dev_close(eth_dev);
522 
523 	return 0;
524 }
525 
526 static int
527 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
528 		struct rte_pci_device *pci_dev)
529 {
530 	return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
531 			sizeof(struct ngbe_adapter),
532 			eth_dev_pci_specific_init, pci_dev,
533 			eth_ngbe_dev_init, NULL);
534 }
535 
536 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
537 {
538 	struct rte_eth_dev *ethdev;
539 
540 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
541 	if (ethdev == NULL)
542 		return 0;
543 
544 	return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
545 }
546 
547 static struct rte_pci_driver rte_ngbe_pmd = {
548 	.id_table = pci_id_ngbe_map,
549 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
550 		     RTE_PCI_DRV_INTR_LSC,
551 	.probe = eth_ngbe_pci_probe,
552 	.remove = eth_ngbe_pci_remove,
553 };
554 
555 static int
556 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
557 {
558 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
559 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
560 	uint32_t vfta;
561 	uint32_t vid_idx;
562 	uint32_t vid_bit;
563 
564 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
565 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
566 	vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
567 	if (on)
568 		vfta |= vid_bit;
569 	else
570 		vfta &= ~vid_bit;
571 	wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
572 
573 	/* update local VFTA copy */
574 	shadow_vfta->vfta[vid_idx] = vfta;
575 
576 	return 0;
577 }
578 
579 static void
580 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
581 {
582 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
583 	struct ngbe_rx_queue *rxq;
584 	bool restart;
585 	uint32_t rxcfg, rxbal, rxbah;
586 
587 	if (on)
588 		ngbe_vlan_hw_strip_enable(dev, queue);
589 	else
590 		ngbe_vlan_hw_strip_disable(dev, queue);
591 
592 	rxq = dev->data->rx_queues[queue];
593 	rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
594 	rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
595 	rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
596 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
597 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
598 			!(rxcfg & NGBE_RXCFG_VLAN);
599 		rxcfg |= NGBE_RXCFG_VLAN;
600 	} else {
601 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
602 			(rxcfg & NGBE_RXCFG_VLAN);
603 		rxcfg &= ~NGBE_RXCFG_VLAN;
604 	}
605 	rxcfg &= ~NGBE_RXCFG_ENA;
606 
607 	if (restart) {
608 		/* set vlan strip for ring */
609 		ngbe_dev_rx_queue_stop(dev, queue);
610 		wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
611 		wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
612 		wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
613 		ngbe_dev_rx_queue_start(dev, queue);
614 	}
615 }
616 
617 static int
618 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
619 		    enum rte_vlan_type vlan_type,
620 		    uint16_t tpid)
621 {
622 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
623 	int ret = 0;
624 	uint32_t portctrl, vlan_ext, qinq;
625 
626 	portctrl = rd32(hw, NGBE_PORTCTL);
627 
628 	vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
629 	qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
630 	switch (vlan_type) {
631 	case RTE_ETH_VLAN_TYPE_INNER:
632 		if (vlan_ext) {
633 			wr32m(hw, NGBE_VLANCTL,
634 				NGBE_VLANCTL_TPID_MASK,
635 				NGBE_VLANCTL_TPID(tpid));
636 			wr32m(hw, NGBE_DMATXCTRL,
637 				NGBE_DMATXCTRL_TPID_MASK,
638 				NGBE_DMATXCTRL_TPID(tpid));
639 		} else {
640 			ret = -ENOTSUP;
641 			PMD_DRV_LOG(ERR,
642 				"Inner type is not supported by single VLAN");
643 		}
644 
645 		if (qinq) {
646 			wr32m(hw, NGBE_TAGTPID(0),
647 				NGBE_TAGTPID_LSB_MASK,
648 				NGBE_TAGTPID_LSB(tpid));
649 		}
650 		break;
651 	case RTE_ETH_VLAN_TYPE_OUTER:
652 		if (vlan_ext) {
653 			/* Only the high 16-bits is valid */
654 			wr32m(hw, NGBE_EXTAG,
655 				NGBE_EXTAG_VLAN_MASK,
656 				NGBE_EXTAG_VLAN(tpid));
657 		} else {
658 			wr32m(hw, NGBE_VLANCTL,
659 				NGBE_VLANCTL_TPID_MASK,
660 				NGBE_VLANCTL_TPID(tpid));
661 			wr32m(hw, NGBE_DMATXCTRL,
662 				NGBE_DMATXCTRL_TPID_MASK,
663 				NGBE_DMATXCTRL_TPID(tpid));
664 		}
665 
666 		if (qinq) {
667 			wr32m(hw, NGBE_TAGTPID(0),
668 				NGBE_TAGTPID_MSB_MASK,
669 				NGBE_TAGTPID_MSB(tpid));
670 		}
671 		break;
672 	default:
673 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
674 		return -EINVAL;
675 	}
676 
677 	return ret;
678 }
679 
680 void
681 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
682 {
683 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
684 	uint32_t vlnctrl;
685 
686 	PMD_INIT_FUNC_TRACE();
687 
688 	/* Filter Table Disable */
689 	vlnctrl = rd32(hw, NGBE_VLANCTL);
690 	vlnctrl &= ~NGBE_VLANCTL_VFE;
691 	wr32(hw, NGBE_VLANCTL, vlnctrl);
692 }
693 
694 void
695 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
696 {
697 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
698 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
699 	uint32_t vlnctrl;
700 	uint16_t i;
701 
702 	PMD_INIT_FUNC_TRACE();
703 
704 	/* Filter Table Enable */
705 	vlnctrl = rd32(hw, NGBE_VLANCTL);
706 	vlnctrl &= ~NGBE_VLANCTL_CFIENA;
707 	vlnctrl |= NGBE_VLANCTL_VFE;
708 	wr32(hw, NGBE_VLANCTL, vlnctrl);
709 
710 	/* write whatever is in local vfta copy */
711 	for (i = 0; i < NGBE_VFTA_SIZE; i++)
712 		wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
713 }
714 
715 void
716 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
717 {
718 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
719 	struct ngbe_rx_queue *rxq;
720 
721 	if (queue >= NGBE_MAX_RX_QUEUE_NUM)
722 		return;
723 
724 	if (on)
725 		NGBE_SET_HWSTRIP(hwstrip, queue);
726 	else
727 		NGBE_CLEAR_HWSTRIP(hwstrip, queue);
728 
729 	if (queue >= dev->data->nb_rx_queues)
730 		return;
731 
732 	rxq = dev->data->rx_queues[queue];
733 
734 	if (on) {
735 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
736 		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
737 	} else {
738 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
739 		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
740 	}
741 }
742 
743 static void
744 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
745 {
746 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
747 	uint32_t ctrl;
748 
749 	PMD_INIT_FUNC_TRACE();
750 
751 	ctrl = rd32(hw, NGBE_RXCFG(queue));
752 	ctrl &= ~NGBE_RXCFG_VLAN;
753 	wr32(hw, NGBE_RXCFG(queue), ctrl);
754 
755 	/* record those setting for HW strip per queue */
756 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
757 }
758 
759 static void
760 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
761 {
762 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
763 	uint32_t ctrl;
764 
765 	PMD_INIT_FUNC_TRACE();
766 
767 	ctrl = rd32(hw, NGBE_RXCFG(queue));
768 	ctrl |= NGBE_RXCFG_VLAN;
769 	wr32(hw, NGBE_RXCFG(queue), ctrl);
770 
771 	/* record those setting for HW strip per queue */
772 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
773 }
774 
775 static void
776 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
777 {
778 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
779 	uint32_t ctrl;
780 
781 	PMD_INIT_FUNC_TRACE();
782 
783 	ctrl = rd32(hw, NGBE_PORTCTL);
784 	ctrl &= ~NGBE_PORTCTL_VLANEXT;
785 	ctrl &= ~NGBE_PORTCTL_QINQ;
786 	wr32(hw, NGBE_PORTCTL, ctrl);
787 }
788 
789 static void
790 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
791 {
792 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
793 	uint32_t ctrl;
794 
795 	PMD_INIT_FUNC_TRACE();
796 
797 	ctrl  = rd32(hw, NGBE_PORTCTL);
798 	ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
799 	wr32(hw, NGBE_PORTCTL, ctrl);
800 }
801 
802 static void
803 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
804 {
805 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
806 	uint32_t ctrl;
807 
808 	PMD_INIT_FUNC_TRACE();
809 
810 	ctrl = rd32(hw, NGBE_PORTCTL);
811 	ctrl &= ~NGBE_PORTCTL_QINQ;
812 	wr32(hw, NGBE_PORTCTL, ctrl);
813 }
814 
815 static void
816 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
817 {
818 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
819 	uint32_t ctrl;
820 
821 	PMD_INIT_FUNC_TRACE();
822 
823 	ctrl  = rd32(hw, NGBE_PORTCTL);
824 	ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
825 	wr32(hw, NGBE_PORTCTL, ctrl);
826 }
827 
828 void
829 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
830 {
831 	struct ngbe_rx_queue *rxq;
832 	uint16_t i;
833 
834 	PMD_INIT_FUNC_TRACE();
835 
836 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
837 		rxq = dev->data->rx_queues[i];
838 
839 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
840 			ngbe_vlan_hw_strip_enable(dev, i);
841 		else
842 			ngbe_vlan_hw_strip_disable(dev, i);
843 	}
844 }
845 
846 void
847 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
848 {
849 	uint16_t i;
850 	struct rte_eth_rxmode *rxmode;
851 	struct ngbe_rx_queue *rxq;
852 
853 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
854 		rxmode = &dev->data->dev_conf.rxmode;
855 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
856 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
857 				rxq = dev->data->rx_queues[i];
858 				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
859 			}
860 		else
861 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
862 				rxq = dev->data->rx_queues[i];
863 				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
864 			}
865 	}
866 }
867 
868 static int
869 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
870 {
871 	struct rte_eth_rxmode *rxmode;
872 	rxmode = &dev->data->dev_conf.rxmode;
873 
874 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
875 		ngbe_vlan_hw_strip_config(dev);
876 
877 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
878 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
879 			ngbe_vlan_hw_filter_enable(dev);
880 		else
881 			ngbe_vlan_hw_filter_disable(dev);
882 	}
883 
884 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
885 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
886 			ngbe_vlan_hw_extend_enable(dev);
887 		else
888 			ngbe_vlan_hw_extend_disable(dev);
889 	}
890 
891 	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
892 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
893 			ngbe_qinq_hw_strip_enable(dev);
894 		else
895 			ngbe_qinq_hw_strip_disable(dev);
896 	}
897 
898 	return 0;
899 }
900 
901 static int
902 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
903 {
904 	ngbe_config_vlan_strip_on_all_queues(dev, mask);
905 
906 	ngbe_vlan_offload_config(dev, mask);
907 
908 	return 0;
909 }
910 
911 static int
912 ngbe_dev_configure(struct rte_eth_dev *dev)
913 {
914 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
915 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
916 
917 	PMD_INIT_FUNC_TRACE();
918 
919 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
920 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
921 
922 	/* set flag to update link status after init */
923 	intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
924 
925 	/*
926 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
927 	 * allocation Rx preconditions we will reset it.
928 	 */
929 	adapter->rx_bulk_alloc_allowed = true;
930 
931 	return 0;
932 }
933 
934 static void
935 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
936 {
937 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
938 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
939 
940 	wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
941 	wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
942 	wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
943 	if (hw->phy.type == ngbe_phy_yt8521s_sfi)
944 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
945 	else
946 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
947 
948 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
949 }
950 
951 /*
952  * Configure device link speed and setup link.
953  * It returns 0 on success.
954  */
955 static int
956 ngbe_dev_start(struct rte_eth_dev *dev)
957 {
958 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
959 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
960 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
961 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
962 	uint32_t intr_vector = 0;
963 	int err;
964 	bool link_up = false, negotiate = false;
965 	uint32_t speed = 0;
966 	uint32_t allowed_speeds = 0;
967 	int mask = 0;
968 	int status;
969 	uint32_t *link_speeds;
970 
971 	PMD_INIT_FUNC_TRACE();
972 
973 	/* Stop the link setup handler before resetting the HW. */
974 	rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev);
975 
976 	/* disable uio/vfio intr/eventfd mapping */
977 	rte_intr_disable(intr_handle);
978 
979 	/* stop adapter */
980 	hw->adapter_stopped = 0;
981 
982 	/* reinitialize adapter, this calls reset and start */
983 	hw->nb_rx_queues = dev->data->nb_rx_queues;
984 	hw->nb_tx_queues = dev->data->nb_tx_queues;
985 	status = ngbe_pf_reset_hw(hw);
986 	if (status != 0)
987 		return -1;
988 	hw->mac.start_hw(hw);
989 	hw->mac.get_link_status = true;
990 
991 	ngbe_set_pcie_master(hw, true);
992 
993 	/* configure PF module if SRIOV enabled */
994 	ngbe_pf_host_configure(dev);
995 
996 	ngbe_dev_phy_intr_setup(dev);
997 
998 	/* check and configure queue intr-vector mapping */
999 	if ((rte_intr_cap_multiple(intr_handle) ||
1000 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
1001 	    dev->data->dev_conf.intr_conf.rxq != 0) {
1002 		intr_vector = dev->data->nb_rx_queues;
1003 		if (rte_intr_efd_enable(intr_handle, intr_vector))
1004 			return -1;
1005 	}
1006 
1007 	if (rte_intr_dp_is_en(intr_handle)) {
1008 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
1009 						   dev->data->nb_rx_queues)) {
1010 			PMD_INIT_LOG(ERR,
1011 				     "Failed to allocate %d rx_queues intr_vec",
1012 				     dev->data->nb_rx_queues);
1013 			return -ENOMEM;
1014 		}
1015 	}
1016 
1017 	/* configure MSI-X for sleep until Rx interrupt */
1018 	ngbe_configure_msix(dev);
1019 
1020 	/* initialize transmission unit */
1021 	ngbe_dev_tx_init(dev);
1022 
1023 	/* This can fail when allocating mbufs for descriptor rings */
1024 	err = ngbe_dev_rx_init(dev);
1025 	if (err != 0) {
1026 		PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
1027 		goto error;
1028 	}
1029 
1030 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1031 		RTE_ETH_VLAN_EXTEND_MASK;
1032 	err = ngbe_vlan_offload_config(dev, mask);
1033 	if (err != 0) {
1034 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1035 		goto error;
1036 	}
1037 
1038 	hw->mac.setup_pba(hw);
1039 	ngbe_configure_port(dev);
1040 
1041 	err = ngbe_dev_rxtx_start(dev);
1042 	if (err < 0) {
1043 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1044 		goto error;
1045 	}
1046 
1047 	/* Skip link setup if loopback mode is enabled. */
1048 	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1049 		goto skip_link_setup;
1050 
1051 	err = hw->mac.check_link(hw, &speed, &link_up, 0);
1052 	if (err != 0)
1053 		goto error;
1054 	dev->data->dev_link.link_status = link_up;
1055 
1056 	link_speeds = &dev->data->dev_conf.link_speeds;
1057 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
1058 		negotiate = true;
1059 
1060 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1061 	if (err != 0)
1062 		goto error;
1063 
1064 	allowed_speeds = 0;
1065 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1066 		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
1067 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1068 		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
1069 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1070 		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
1071 
1072 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
1073 		PMD_INIT_LOG(ERR, "Invalid link setting");
1074 		goto error;
1075 	}
1076 
1077 	speed = 0x0;
1078 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1079 		speed = hw->mac.default_speeds;
1080 	} else {
1081 		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1082 			speed |= NGBE_LINK_SPEED_1GB_FULL;
1083 		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1084 			speed |= NGBE_LINK_SPEED_100M_FULL;
1085 		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
1086 			speed |= NGBE_LINK_SPEED_10M_FULL;
1087 	}
1088 
1089 	err = hw->phy.init_hw(hw);
1090 	if (err != 0) {
1091 		PMD_INIT_LOG(ERR, "PHY init failed");
1092 		goto error;
1093 	}
1094 	err = hw->mac.setup_link(hw, speed, link_up);
1095 	if (err != 0)
1096 		goto error;
1097 
1098 skip_link_setup:
1099 
1100 	if (rte_intr_allow_others(intr_handle)) {
1101 		ngbe_dev_misc_interrupt_setup(dev);
1102 		/* check if lsc interrupt is enabled */
1103 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1104 			ngbe_dev_lsc_interrupt_setup(dev, TRUE);
1105 		else
1106 			ngbe_dev_lsc_interrupt_setup(dev, FALSE);
1107 		ngbe_dev_macsec_interrupt_setup(dev);
1108 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1109 	} else {
1110 		rte_intr_callback_unregister(intr_handle,
1111 					     ngbe_dev_interrupt_handler, dev);
1112 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1113 			PMD_INIT_LOG(INFO,
1114 				     "LSC won't enable because of no intr multiplex");
1115 	}
1116 
1117 	/* check if rxq interrupt is enabled */
1118 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1119 	    rte_intr_dp_is_en(intr_handle))
1120 		ngbe_dev_rxq_interrupt_setup(dev);
1121 
1122 	/* enable UIO/VFIO intr/eventfd mapping */
1123 	rte_intr_enable(intr_handle);
1124 
1125 	/* resume enabled intr since HW reset */
1126 	ngbe_enable_intr(dev);
1127 
1128 	if (hw->gpio_ctl) {
1129 		/* gpio0 is used to power on/off control*/
1130 		wr32(hw, NGBE_GPIODATA, 0);
1131 	}
1132 
1133 	/*
1134 	 * Update link status right before return, because it may
1135 	 * start link configuration process in a separate thread.
1136 	 */
1137 	ngbe_dev_link_update(dev, 0);
1138 
1139 	ngbe_read_stats_registers(hw, hw_stats);
1140 	hw->offset_loaded = 1;
1141 
1142 	return 0;
1143 
1144 error:
1145 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1146 	ngbe_dev_clear_queues(dev);
1147 	return -EIO;
1148 }
1149 
1150 /*
1151  * Stop device: disable rx and tx functions to allow for reconfiguring.
1152  */
1153 static int
1154 ngbe_dev_stop(struct rte_eth_dev *dev)
1155 {
1156 	struct rte_eth_link link;
1157 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1158 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1159 	struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1160 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1161 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1162 	int vf;
1163 
1164 	if (hw->adapter_stopped)
1165 		return 0;
1166 
1167 	PMD_INIT_FUNC_TRACE();
1168 
1169 	rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev);
1170 
1171 	if (hw->gpio_ctl) {
1172 		/* gpio0 is used to power on/off control*/
1173 		wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1174 	}
1175 
1176 	/* disable interrupts */
1177 	ngbe_disable_intr(hw);
1178 
1179 	/* reset the NIC */
1180 	ngbe_pf_reset_hw(hw);
1181 	hw->adapter_stopped = 0;
1182 
1183 	/* stop adapter */
1184 	ngbe_stop_hw(hw);
1185 
1186 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1187 		vfinfo[vf].clear_to_send = false;
1188 
1189 	hw->phy.set_phy_power(hw, false);
1190 
1191 	ngbe_dev_clear_queues(dev);
1192 
1193 	/* Clear stored conf */
1194 	dev->data->scattered_rx = 0;
1195 
1196 	/* Clear recorded link status */
1197 	memset(&link, 0, sizeof(link));
1198 	rte_eth_linkstatus_set(dev, &link);
1199 
1200 	if (!rte_intr_allow_others(intr_handle))
1201 		/* resume to the default handler */
1202 		rte_intr_callback_register(intr_handle,
1203 					   ngbe_dev_interrupt_handler,
1204 					   (void *)dev);
1205 
1206 	/* Clean datapath event and queue/vec mapping */
1207 	rte_intr_efd_disable(intr_handle);
1208 	rte_intr_vec_list_free(intr_handle);
1209 
1210 	ngbe_set_pcie_master(hw, true);
1211 
1212 	adapter->rss_reta_updated = 0;
1213 
1214 	hw->adapter_stopped = true;
1215 	dev->data->dev_started = 0;
1216 
1217 	return 0;
1218 }
1219 
1220 /*
1221  * Reset and stop device.
1222  */
1223 static int
1224 ngbe_dev_close(struct rte_eth_dev *dev)
1225 {
1226 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1227 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1228 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1229 	int retries = 0;
1230 	int ret;
1231 
1232 	PMD_INIT_FUNC_TRACE();
1233 
1234 	ngbe_pf_reset_hw(hw);
1235 
1236 	ngbe_dev_stop(dev);
1237 
1238 	ngbe_dev_free_queues(dev);
1239 
1240 	ngbe_set_pcie_master(hw, false);
1241 
1242 	/* reprogram the RAR[0] in case user changed it. */
1243 	ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1244 
1245 	/* Unlock any pending hardware semaphore */
1246 	ngbe_swfw_lock_reset(hw);
1247 
1248 	/* disable uio intr before callback unregister */
1249 	rte_intr_disable(intr_handle);
1250 
1251 	do {
1252 		ret = rte_intr_callback_unregister(intr_handle,
1253 				ngbe_dev_interrupt_handler, dev);
1254 		if (ret >= 0 || ret == -ENOENT) {
1255 			break;
1256 		} else if (ret != -EAGAIN) {
1257 			PMD_INIT_LOG(ERR,
1258 				"intr callback unregister failed: %d",
1259 				ret);
1260 		}
1261 		rte_delay_ms(100);
1262 	} while (retries++ < (10 + NGBE_LINK_UP_TIME));
1263 
1264 	/* uninitialize PF if max_vfs not zero */
1265 	ngbe_pf_host_uninit(dev);
1266 
1267 	rte_free(dev->data->mac_addrs);
1268 	dev->data->mac_addrs = NULL;
1269 
1270 	rte_free(dev->data->hash_mac_addrs);
1271 	dev->data->hash_mac_addrs = NULL;
1272 
1273 	return ret;
1274 }
1275 
1276 /*
1277  * Reset PF device.
1278  */
1279 static int
1280 ngbe_dev_reset(struct rte_eth_dev *dev)
1281 {
1282 	int ret;
1283 
1284 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
1285 	 * its VF to make them align with it. The detailed notification
1286 	 * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1287 	 * To avoid unexpected behavior in VF, currently reset of PF with
1288 	 * SR-IOV activation is not supported. It might be supported later.
1289 	 */
1290 	if (dev->data->sriov.active)
1291 		return -ENOTSUP;
1292 
1293 	ret = eth_ngbe_dev_uninit(dev);
1294 	if (ret != 0)
1295 		return ret;
1296 
1297 	ret = eth_ngbe_dev_init(dev, NULL);
1298 
1299 	return ret;
1300 }
1301 
1302 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1303 	{                                                       \
1304 		uint32_t current_counter = rd32(hw, reg);       \
1305 		if (current_counter < last_counter)             \
1306 			current_counter += 0x100000000LL;       \
1307 		if (!hw->offset_loaded)                         \
1308 			last_counter = current_counter;         \
1309 		counter = current_counter - last_counter;       \
1310 		counter &= 0xFFFFFFFFLL;                        \
1311 	}
1312 
1313 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1314 	{                                                                \
1315 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1316 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1317 		uint64_t current_counter = (current_counter_msb << 32) | \
1318 			current_counter_lsb;                             \
1319 		if (current_counter < last_counter)                      \
1320 			current_counter += 0x1000000000LL;               \
1321 		if (!hw->offset_loaded)                                  \
1322 			last_counter = current_counter;                  \
1323 		counter = current_counter - last_counter;                \
1324 		counter &= 0xFFFFFFFFFLL;                                \
1325 	}
1326 
1327 void
1328 ngbe_read_stats_registers(struct ngbe_hw *hw,
1329 			   struct ngbe_hw_stats *hw_stats)
1330 {
1331 	unsigned int i;
1332 
1333 	/* QP Stats */
1334 	for (i = 0; i < hw->nb_rx_queues; i++) {
1335 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1336 			hw->qp_last[i].rx_qp_packets,
1337 			hw_stats->qp[i].rx_qp_packets);
1338 		UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1339 			hw->qp_last[i].rx_qp_bytes,
1340 			hw_stats->qp[i].rx_qp_bytes);
1341 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1342 			hw->qp_last[i].rx_qp_mc_packets,
1343 			hw_stats->qp[i].rx_qp_mc_packets);
1344 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1345 			hw->qp_last[i].rx_qp_bc_packets,
1346 			hw_stats->qp[i].rx_qp_bc_packets);
1347 	}
1348 
1349 	for (i = 0; i < hw->nb_tx_queues; i++) {
1350 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1351 			hw->qp_last[i].tx_qp_packets,
1352 			hw_stats->qp[i].tx_qp_packets);
1353 		UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1354 			hw->qp_last[i].tx_qp_bytes,
1355 			hw_stats->qp[i].tx_qp_bytes);
1356 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1357 			hw->qp_last[i].tx_qp_mc_packets,
1358 			hw_stats->qp[i].tx_qp_mc_packets);
1359 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1360 			hw->qp_last[i].tx_qp_bc_packets,
1361 			hw_stats->qp[i].tx_qp_bc_packets);
1362 	}
1363 
1364 	/* PB Stats */
1365 	hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1366 	hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1367 	hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1368 	hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1369 	hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1370 	hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1371 
1372 	hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1373 	hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1374 
1375 	/* DMA Stats */
1376 	hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1377 	hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1378 	hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1379 	hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1380 	hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1381 	hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1382 	hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1383 	hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1384 
1385 	/* MAC Stats */
1386 	hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1387 	hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1388 	hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1389 
1390 	hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1391 	hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1392 	hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1393 
1394 	hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1395 	hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1396 
1397 	hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1398 	hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1399 	hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1400 	hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1401 	hw_stats->rx_size_512_to_1023_packets +=
1402 			rd64(hw, NGBE_MACRX512TO1023L);
1403 	hw_stats->rx_size_1024_to_max_packets +=
1404 			rd64(hw, NGBE_MACRX1024TOMAXL);
1405 	hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1406 	hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1407 	hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1408 	hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1409 	hw_stats->tx_size_512_to_1023_packets +=
1410 			rd64(hw, NGBE_MACTX512TO1023L);
1411 	hw_stats->tx_size_1024_to_max_packets +=
1412 			rd64(hw, NGBE_MACTX1024TOMAXL);
1413 
1414 	hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1415 	hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1416 	hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1417 
1418 	/* MNG Stats */
1419 	hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1420 	hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1421 	hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1422 	hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1423 
1424 	/* MACsec Stats */
1425 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1426 	hw_stats->tx_macsec_pkts_encrypted +=
1427 			rd32(hw, NGBE_LSECTX_ENCPKT);
1428 	hw_stats->tx_macsec_pkts_protected +=
1429 			rd32(hw, NGBE_LSECTX_PROTPKT);
1430 	hw_stats->tx_macsec_octets_encrypted +=
1431 			rd32(hw, NGBE_LSECTX_ENCOCT);
1432 	hw_stats->tx_macsec_octets_protected +=
1433 			rd32(hw, NGBE_LSECTX_PROTOCT);
1434 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1435 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1436 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1437 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1438 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1439 	hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1440 	hw_stats->rx_macsec_sc_pkts_unchecked +=
1441 			rd32(hw, NGBE_LSECRX_UNCHKPKT);
1442 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1443 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1444 	for (i = 0; i < 2; i++) {
1445 		hw_stats->rx_macsec_sa_pkts_ok +=
1446 			rd32(hw, NGBE_LSECRX_OKPKT(i));
1447 		hw_stats->rx_macsec_sa_pkts_invalid +=
1448 			rd32(hw, NGBE_LSECRX_INVPKT(i));
1449 		hw_stats->rx_macsec_sa_pkts_notvalid +=
1450 			rd32(hw, NGBE_LSECRX_BADPKT(i));
1451 	}
1452 	for (i = 0; i < 4; i++) {
1453 		hw_stats->rx_macsec_sa_pkts_unusedsa +=
1454 			rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1455 		hw_stats->rx_macsec_sa_pkts_notusingsa +=
1456 			rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1457 	}
1458 	hw_stats->rx_total_missed_packets =
1459 			hw_stats->rx_up_dropped;
1460 }
1461 
1462 static int
1463 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1464 {
1465 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1466 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1467 	struct ngbe_stat_mappings *stat_mappings =
1468 			NGBE_DEV_STAT_MAPPINGS(dev);
1469 	uint32_t i, j;
1470 
1471 	ngbe_read_stats_registers(hw, hw_stats);
1472 
1473 	if (stats == NULL)
1474 		return -EINVAL;
1475 
1476 	/* Fill out the rte_eth_stats statistics structure */
1477 	stats->ipackets = hw_stats->rx_packets;
1478 	stats->ibytes = hw_stats->rx_bytes;
1479 	stats->opackets = hw_stats->tx_packets;
1480 	stats->obytes = hw_stats->tx_bytes;
1481 
1482 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1483 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1484 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1485 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1486 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1487 	for (i = 0; i < NGBE_MAX_QP; i++) {
1488 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1489 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1490 		uint32_t q_map;
1491 
1492 		q_map = (stat_mappings->rqsm[n] >> offset)
1493 				& QMAP_FIELD_RESERVED_BITS_MASK;
1494 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1495 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1496 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1497 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1498 
1499 		q_map = (stat_mappings->tqsm[n] >> offset)
1500 				& QMAP_FIELD_RESERVED_BITS_MASK;
1501 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1502 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1503 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1504 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1505 	}
1506 
1507 	/* Rx Errors */
1508 	stats->imissed  = hw_stats->rx_total_missed_packets +
1509 			  hw_stats->rx_dma_drop;
1510 	stats->ierrors  = hw_stats->rx_crc_errors +
1511 			  hw_stats->rx_mac_short_packet_dropped +
1512 			  hw_stats->rx_length_errors +
1513 			  hw_stats->rx_undersize_errors +
1514 			  hw_stats->rx_oversize_errors +
1515 			  hw_stats->rx_illegal_byte_errors +
1516 			  hw_stats->rx_error_bytes +
1517 			  hw_stats->rx_fragment_errors;
1518 
1519 	/* Tx Errors */
1520 	stats->oerrors  = 0;
1521 	return 0;
1522 }
1523 
1524 static int
1525 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1526 {
1527 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1528 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1529 
1530 	/* HW registers are cleared on read */
1531 	hw->offset_loaded = 0;
1532 	ngbe_dev_stats_get(dev, NULL);
1533 	hw->offset_loaded = 1;
1534 
1535 	/* Reset software totals */
1536 	memset(hw_stats, 0, sizeof(*hw_stats));
1537 
1538 	return 0;
1539 }
1540 
1541 /* This function calculates the number of xstats based on the current config */
1542 static unsigned
1543 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1544 {
1545 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1546 	return NGBE_NB_HW_STATS +
1547 	       NGBE_NB_QP_STATS * nb_queues;
1548 }
1549 
1550 static inline int
1551 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1552 {
1553 	int nb, st;
1554 
1555 	/* Extended stats from ngbe_hw_stats */
1556 	if (id < NGBE_NB_HW_STATS) {
1557 		snprintf(name, size, "[hw]%s",
1558 			rte_ngbe_stats_strings[id].name);
1559 		return 0;
1560 	}
1561 	id -= NGBE_NB_HW_STATS;
1562 
1563 	/* Queue Stats */
1564 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1565 		nb = id / NGBE_NB_QP_STATS;
1566 		st = id % NGBE_NB_QP_STATS;
1567 		snprintf(name, size, "[q%u]%s", nb,
1568 			rte_ngbe_qp_strings[st].name);
1569 		return 0;
1570 	}
1571 	id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1572 
1573 	return -(int)(id + 1);
1574 }
1575 
1576 static inline int
1577 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1578 {
1579 	int nb, st;
1580 
1581 	/* Extended stats from ngbe_hw_stats */
1582 	if (id < NGBE_NB_HW_STATS) {
1583 		*offset = rte_ngbe_stats_strings[id].offset;
1584 		return 0;
1585 	}
1586 	id -= NGBE_NB_HW_STATS;
1587 
1588 	/* Queue Stats */
1589 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1590 		nb = id / NGBE_NB_QP_STATS;
1591 		st = id % NGBE_NB_QP_STATS;
1592 		*offset = rte_ngbe_qp_strings[st].offset +
1593 			nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1594 		return 0;
1595 	}
1596 
1597 	return -1;
1598 }
1599 
1600 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1601 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1602 {
1603 	unsigned int i, count;
1604 
1605 	count = ngbe_xstats_calc_num(dev);
1606 	if (xstats_names == NULL)
1607 		return count;
1608 
1609 	/* Note: limit >= cnt_stats checked upstream
1610 	 * in rte_eth_xstats_names()
1611 	 */
1612 	limit = min(limit, count);
1613 
1614 	/* Extended stats from ngbe_hw_stats */
1615 	for (i = 0; i < limit; i++) {
1616 		if (ngbe_get_name_by_id(i, xstats_names[i].name,
1617 			sizeof(xstats_names[i].name))) {
1618 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1619 			break;
1620 		}
1621 	}
1622 
1623 	return i;
1624 }
1625 
1626 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1627 	const uint64_t *ids,
1628 	struct rte_eth_xstat_name *xstats_names,
1629 	unsigned int limit)
1630 {
1631 	unsigned int i;
1632 
1633 	if (ids == NULL)
1634 		return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1635 
1636 	for (i = 0; i < limit; i++) {
1637 		if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1638 				sizeof(xstats_names[i].name))) {
1639 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1640 			return -1;
1641 		}
1642 	}
1643 
1644 	return i;
1645 }
1646 
1647 static int
1648 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1649 					 unsigned int limit)
1650 {
1651 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1652 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1653 	unsigned int i, count;
1654 
1655 	ngbe_read_stats_registers(hw, hw_stats);
1656 
1657 	/* If this is a reset xstats is NULL, and we have cleared the
1658 	 * registers by reading them.
1659 	 */
1660 	count = ngbe_xstats_calc_num(dev);
1661 	if (xstats == NULL)
1662 		return count;
1663 
1664 	limit = min(limit, ngbe_xstats_calc_num(dev));
1665 
1666 	/* Extended stats from ngbe_hw_stats */
1667 	for (i = 0; i < limit; i++) {
1668 		uint32_t offset = 0;
1669 
1670 		if (ngbe_get_offset_by_id(i, &offset)) {
1671 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1672 			break;
1673 		}
1674 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1675 		xstats[i].id = i;
1676 	}
1677 
1678 	return i;
1679 }
1680 
1681 static int
1682 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1683 					 unsigned int limit)
1684 {
1685 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1686 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1687 	unsigned int i, count;
1688 
1689 	ngbe_read_stats_registers(hw, hw_stats);
1690 
1691 	/* If this is a reset xstats is NULL, and we have cleared the
1692 	 * registers by reading them.
1693 	 */
1694 	count = ngbe_xstats_calc_num(dev);
1695 	if (values == NULL)
1696 		return count;
1697 
1698 	limit = min(limit, ngbe_xstats_calc_num(dev));
1699 
1700 	/* Extended stats from ngbe_hw_stats */
1701 	for (i = 0; i < limit; i++) {
1702 		uint32_t offset;
1703 
1704 		if (ngbe_get_offset_by_id(i, &offset)) {
1705 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1706 			break;
1707 		}
1708 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1709 	}
1710 
1711 	return i;
1712 }
1713 
1714 static int
1715 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1716 		uint64_t *values, unsigned int limit)
1717 {
1718 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1719 	unsigned int i;
1720 
1721 	if (ids == NULL)
1722 		return ngbe_dev_xstats_get_(dev, values, limit);
1723 
1724 	for (i = 0; i < limit; i++) {
1725 		uint32_t offset;
1726 
1727 		if (ngbe_get_offset_by_id(ids[i], &offset)) {
1728 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1729 			break;
1730 		}
1731 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1732 	}
1733 
1734 	return i;
1735 }
1736 
1737 static int
1738 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1739 {
1740 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1741 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1742 
1743 	/* HW registers are cleared on read */
1744 	hw->offset_loaded = 0;
1745 	ngbe_read_stats_registers(hw, hw_stats);
1746 	hw->offset_loaded = 1;
1747 
1748 	/* Reset software totals */
1749 	memset(hw_stats, 0, sizeof(*hw_stats));
1750 
1751 	return 0;
1752 }
1753 
1754 static int
1755 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1756 {
1757 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1758 	int ret;
1759 
1760 	ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1761 
1762 	if (ret < 0)
1763 		return -EINVAL;
1764 
1765 	ret += 1; /* add the size of '\0' */
1766 	if (fw_size < (size_t)ret)
1767 		return ret;
1768 
1769 	return 0;
1770 }
1771 
1772 static int
1773 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1774 {
1775 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1776 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1777 
1778 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1779 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1780 	dev_info->min_rx_bufsize = 1024;
1781 	dev_info->max_rx_pktlen = 15872;
1782 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1783 	dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1784 	dev_info->max_vfs = pci_dev->max_vfs;
1785 	dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1786 	dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1787 				     dev_info->rx_queue_offload_capa);
1788 	dev_info->tx_queue_offload_capa = 0;
1789 	dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1790 
1791 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1792 		.rx_thresh = {
1793 			.pthresh = NGBE_DEFAULT_RX_PTHRESH,
1794 			.hthresh = NGBE_DEFAULT_RX_HTHRESH,
1795 			.wthresh = NGBE_DEFAULT_RX_WTHRESH,
1796 		},
1797 		.rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1798 		.rx_drop_en = 0,
1799 		.offloads = 0,
1800 	};
1801 
1802 	dev_info->default_txconf = (struct rte_eth_txconf) {
1803 		.tx_thresh = {
1804 			.pthresh = NGBE_DEFAULT_TX_PTHRESH,
1805 			.hthresh = NGBE_DEFAULT_TX_HTHRESH,
1806 			.wthresh = NGBE_DEFAULT_TX_WTHRESH,
1807 		},
1808 		.tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1809 		.offloads = 0,
1810 	};
1811 
1812 	dev_info->rx_desc_lim = rx_desc_lim;
1813 	dev_info->tx_desc_lim = tx_desc_lim;
1814 
1815 	dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1816 	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1817 	dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1818 
1819 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1820 				RTE_ETH_LINK_SPEED_10M;
1821 
1822 	/* Driver-preferred Rx/Tx parameters */
1823 	dev_info->default_rxportconf.burst_size = 32;
1824 	dev_info->default_txportconf.burst_size = 32;
1825 	dev_info->default_rxportconf.nb_queues = 1;
1826 	dev_info->default_txportconf.nb_queues = 1;
1827 	dev_info->default_rxportconf.ring_size = 256;
1828 	dev_info->default_txportconf.ring_size = 256;
1829 
1830 	return 0;
1831 }
1832 
1833 const uint32_t *
1834 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1835 {
1836 	if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1837 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1838 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1839 	    dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1840 		return ngbe_get_supported_ptypes();
1841 
1842 	return NULL;
1843 }
1844 
1845 void
1846 ngbe_dev_setup_link_alarm_handler(void *param)
1847 {
1848 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1849 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1850 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1851 	u32 speed;
1852 	bool autoneg = false;
1853 
1854 	speed = hw->phy.autoneg_advertised;
1855 	if (!speed)
1856 		hw->mac.get_link_capabilities(hw, &speed, &autoneg);
1857 
1858 	hw->mac.setup_link(hw, speed, true);
1859 
1860 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1861 }
1862 
1863 /* return 0 means link status changed, -1 means not changed */
1864 int
1865 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1866 			    int wait_to_complete)
1867 {
1868 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1869 	struct rte_eth_link link;
1870 	u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1871 	u32 lan_speed = 0;
1872 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1873 	bool link_up;
1874 	int err;
1875 	int wait = 1;
1876 
1877 	memset(&link, 0, sizeof(link));
1878 	link.link_status = RTE_ETH_LINK_DOWN;
1879 	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1880 	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1881 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1882 			~RTE_ETH_LINK_SPEED_AUTONEG);
1883 
1884 	hw->mac.get_link_status = true;
1885 
1886 	if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1887 		return rte_eth_linkstatus_set(dev, &link);
1888 
1889 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
1890 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1891 		wait = 0;
1892 
1893 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1894 	if (err != 0) {
1895 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1896 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1897 		return rte_eth_linkstatus_set(dev, &link);
1898 	}
1899 
1900 	if (!link_up)
1901 		return rte_eth_linkstatus_set(dev, &link);
1902 
1903 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1904 	link.link_status = RTE_ETH_LINK_UP;
1905 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1906 
1907 	switch (link_speed) {
1908 	default:
1909 	case NGBE_LINK_SPEED_UNKNOWN:
1910 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1911 		break;
1912 
1913 	case NGBE_LINK_SPEED_10M_FULL:
1914 		link.link_speed = RTE_ETH_SPEED_NUM_10M;
1915 		lan_speed = 0;
1916 		break;
1917 
1918 	case NGBE_LINK_SPEED_100M_FULL:
1919 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
1920 		lan_speed = 1;
1921 		break;
1922 
1923 	case NGBE_LINK_SPEED_1GB_FULL:
1924 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
1925 		lan_speed = 2;
1926 		break;
1927 	}
1928 
1929 	if (hw->is_pf) {
1930 		wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1931 		if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1932 				NGBE_LINK_SPEED_100M_FULL |
1933 				NGBE_LINK_SPEED_10M_FULL)) {
1934 			wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1935 				NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1936 		}
1937 	}
1938 
1939 	return rte_eth_linkstatus_set(dev, &link);
1940 }
1941 
1942 static int
1943 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1944 {
1945 	return ngbe_dev_link_update_share(dev, wait_to_complete);
1946 }
1947 
1948 static int
1949 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1950 {
1951 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1952 	uint32_t fctrl;
1953 
1954 	fctrl = rd32(hw, NGBE_PSRCTL);
1955 	fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1956 	wr32(hw, NGBE_PSRCTL, fctrl);
1957 
1958 	return 0;
1959 }
1960 
1961 static int
1962 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1963 {
1964 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1965 	uint32_t fctrl;
1966 
1967 	fctrl = rd32(hw, NGBE_PSRCTL);
1968 	fctrl &= (~NGBE_PSRCTL_UCP);
1969 	if (dev->data->all_multicast == 1)
1970 		fctrl |= NGBE_PSRCTL_MCP;
1971 	else
1972 		fctrl &= (~NGBE_PSRCTL_MCP);
1973 	wr32(hw, NGBE_PSRCTL, fctrl);
1974 
1975 	return 0;
1976 }
1977 
1978 static int
1979 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1980 {
1981 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1982 	uint32_t fctrl;
1983 
1984 	fctrl = rd32(hw, NGBE_PSRCTL);
1985 	fctrl |= NGBE_PSRCTL_MCP;
1986 	wr32(hw, NGBE_PSRCTL, fctrl);
1987 
1988 	return 0;
1989 }
1990 
1991 static int
1992 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1993 {
1994 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1995 	uint32_t fctrl;
1996 
1997 	if (dev->data->promiscuous == 1)
1998 		return 0; /* must remain in all_multicast mode */
1999 
2000 	fctrl = rd32(hw, NGBE_PSRCTL);
2001 	fctrl &= (~NGBE_PSRCTL_MCP);
2002 	wr32(hw, NGBE_PSRCTL, fctrl);
2003 
2004 	return 0;
2005 }
2006 
2007 /**
2008  * It clears the interrupt causes and enables the interrupt.
2009  * It will be called once only during NIC initialized.
2010  *
2011  * @param dev
2012  *  Pointer to struct rte_eth_dev.
2013  * @param on
2014  *  Enable or Disable.
2015  *
2016  * @return
2017  *  - On success, zero.
2018  *  - On failure, a negative value.
2019  */
2020 static int
2021 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2022 {
2023 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2024 
2025 	ngbe_dev_link_status_print(dev);
2026 	if (on != 0) {
2027 		intr->mask_misc |= NGBE_ICRMISC_PHY;
2028 		intr->mask_misc |= NGBE_ICRMISC_GPIO;
2029 	} else {
2030 		intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2031 		intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
2032 	}
2033 
2034 	return 0;
2035 }
2036 
2037 /**
2038  * It clears the interrupt causes and enables the interrupt.
2039  * It will be called once only during NIC initialized.
2040  *
2041  * @param dev
2042  *  Pointer to struct rte_eth_dev.
2043  *
2044  * @return
2045  *  - On success, zero.
2046  *  - On failure, a negative value.
2047  */
2048 static int
2049 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
2050 {
2051 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2052 	u64 mask;
2053 
2054 	mask = NGBE_ICR_MASK;
2055 	mask &= (1ULL << NGBE_MISC_VEC_ID);
2056 	intr->mask |= mask;
2057 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
2058 
2059 	return 0;
2060 }
2061 
2062 /**
2063  * It clears the interrupt causes and enables the interrupt.
2064  * It will be called once only during NIC initialized.
2065  *
2066  * @param dev
2067  *  Pointer to struct rte_eth_dev.
2068  *
2069  * @return
2070  *  - On success, zero.
2071  *  - On failure, a negative value.
2072  */
2073 static int
2074 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2075 {
2076 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2077 	u64 mask;
2078 
2079 	mask = NGBE_ICR_MASK;
2080 	mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
2081 	intr->mask |= mask;
2082 
2083 	return 0;
2084 }
2085 
2086 /**
2087  * It clears the interrupt causes and enables the interrupt.
2088  * It will be called once only during NIC initialized.
2089  *
2090  * @param dev
2091  *  Pointer to struct rte_eth_dev.
2092  *
2093  * @return
2094  *  - On success, zero.
2095  *  - On failure, a negative value.
2096  */
2097 static int
2098 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2099 {
2100 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2101 
2102 	intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
2103 
2104 	return 0;
2105 }
2106 
2107 /*
2108  * It reads ICR and sets flag for the link_update.
2109  *
2110  * @param dev
2111  *  Pointer to struct rte_eth_dev.
2112  *
2113  * @return
2114  *  - On success, zero.
2115  *  - On failure, a negative value.
2116  */
2117 static int
2118 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2119 {
2120 	uint32_t eicr;
2121 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2122 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2123 
2124 	/* read-on-clear nic registers here */
2125 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2126 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2127 
2128 	intr->flags = 0;
2129 
2130 	/* set flag for async link update */
2131 	if (eicr & NGBE_ICRMISC_PHY)
2132 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2133 
2134 	if (eicr & NGBE_ICRMISC_VFMBX)
2135 		intr->flags |= NGBE_FLAG_MAILBOX;
2136 
2137 	if (eicr & NGBE_ICRMISC_LNKSEC)
2138 		intr->flags |= NGBE_FLAG_MACSEC;
2139 
2140 	if (eicr & NGBE_ICRMISC_GPIO)
2141 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2142 
2143 	((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0;
2144 
2145 	return 0;
2146 }
2147 
2148 /**
2149  * It gets and then prints the link status.
2150  *
2151  * @param dev
2152  *  Pointer to struct rte_eth_dev.
2153  *
2154  * @return
2155  *  - On success, zero.
2156  *  - On failure, a negative value.
2157  */
2158 static void
2159 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2160 {
2161 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2162 	struct rte_eth_link link;
2163 
2164 	rte_eth_linkstatus_get(dev, &link);
2165 
2166 	if (link.link_status == RTE_ETH_LINK_UP) {
2167 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2168 					(int)(dev->data->port_id),
2169 					(unsigned int)link.link_speed,
2170 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2171 					"full-duplex" : "half-duplex");
2172 	} else {
2173 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
2174 				(int)(dev->data->port_id));
2175 	}
2176 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2177 				pci_dev->addr.domain,
2178 				pci_dev->addr.bus,
2179 				pci_dev->addr.devid,
2180 				pci_dev->addr.function);
2181 }
2182 
2183 /*
2184  * It executes link_update after knowing an interrupt occurred.
2185  *
2186  * @param dev
2187  *  Pointer to struct rte_eth_dev.
2188  *
2189  * @return
2190  *  - On success, zero.
2191  *  - On failure, a negative value.
2192  */
2193 static int
2194 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2195 {
2196 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2197 
2198 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2199 
2200 	if (intr->flags & NGBE_FLAG_MAILBOX) {
2201 		ngbe_pf_mbx_process(dev);
2202 		intr->flags &= ~NGBE_FLAG_MAILBOX;
2203 	}
2204 
2205 	if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2206 		struct rte_eth_link link;
2207 
2208 		/*get the link status before link update, for predicting later*/
2209 		rte_eth_linkstatus_get(dev, &link);
2210 
2211 		ngbe_dev_link_update(dev, 0);
2212 		intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2213 		ngbe_dev_link_status_print(dev);
2214 		if (dev->data->dev_link.link_speed != link.link_speed)
2215 			rte_eth_dev_callback_process(dev,
2216 				RTE_ETH_EVENT_INTR_LSC, NULL);
2217 	}
2218 
2219 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
2220 	ngbe_enable_intr(dev);
2221 
2222 	return 0;
2223 }
2224 
2225 /**
2226  * Interrupt handler triggered by NIC  for handling
2227  * specific interrupt.
2228  *
2229  * @param param
2230  *  The address of parameter (struct rte_eth_dev *) registered before.
2231  */
2232 static void
2233 ngbe_dev_interrupt_handler(void *param)
2234 {
2235 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2236 
2237 	ngbe_dev_interrupt_get_status(dev);
2238 	ngbe_dev_interrupt_action(dev);
2239 }
2240 
2241 static int
2242 ngbe_dev_led_on(struct rte_eth_dev *dev)
2243 {
2244 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2245 	return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
2246 }
2247 
2248 static int
2249 ngbe_dev_led_off(struct rte_eth_dev *dev)
2250 {
2251 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2252 	return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
2253 }
2254 
2255 static int
2256 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2257 {
2258 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2259 	uint32_t mflcn_reg;
2260 	uint32_t fccfg_reg;
2261 	int rx_pause;
2262 	int tx_pause;
2263 
2264 	fc_conf->pause_time = hw->fc.pause_time;
2265 	fc_conf->high_water = hw->fc.high_water;
2266 	fc_conf->low_water = hw->fc.low_water;
2267 	fc_conf->send_xon = hw->fc.send_xon;
2268 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2269 
2270 	/*
2271 	 * Return rx_pause status according to actual setting of
2272 	 * RXFCCFG register.
2273 	 */
2274 	mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2275 	if (mflcn_reg & NGBE_RXFCCFG_FC)
2276 		rx_pause = 1;
2277 	else
2278 		rx_pause = 0;
2279 
2280 	/*
2281 	 * Return tx_pause status according to actual setting of
2282 	 * TXFCCFG register.
2283 	 */
2284 	fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2285 	if (fccfg_reg & NGBE_TXFCCFG_FC)
2286 		tx_pause = 1;
2287 	else
2288 		tx_pause = 0;
2289 
2290 	if (rx_pause && tx_pause)
2291 		fc_conf->mode = RTE_ETH_FC_FULL;
2292 	else if (rx_pause)
2293 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2294 	else if (tx_pause)
2295 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2296 	else
2297 		fc_conf->mode = RTE_ETH_FC_NONE;
2298 
2299 	return 0;
2300 }
2301 
2302 static int
2303 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2304 {
2305 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2306 	int err;
2307 	uint32_t rx_buf_size;
2308 	uint32_t max_high_water;
2309 	enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2310 		ngbe_fc_none,
2311 		ngbe_fc_rx_pause,
2312 		ngbe_fc_tx_pause,
2313 		ngbe_fc_full
2314 	};
2315 
2316 	PMD_INIT_FUNC_TRACE();
2317 
2318 	rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2319 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2320 
2321 	/*
2322 	 * At least reserve one Ethernet frame for watermark
2323 	 * high_water/low_water in kilo bytes for ngbe
2324 	 */
2325 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2326 	if (fc_conf->high_water > max_high_water ||
2327 	    fc_conf->high_water < fc_conf->low_water) {
2328 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2329 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2330 		return -EINVAL;
2331 	}
2332 
2333 	hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2334 	hw->fc.pause_time     = fc_conf->pause_time;
2335 	hw->fc.high_water     = fc_conf->high_water;
2336 	hw->fc.low_water      = fc_conf->low_water;
2337 	hw->fc.send_xon       = fc_conf->send_xon;
2338 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2339 
2340 	err = hw->mac.fc_enable(hw);
2341 
2342 	/* Not negotiated is not an error case */
2343 	if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2344 		wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2345 		      (fc_conf->mac_ctrl_frame_fwd
2346 		       ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2347 		ngbe_flush(hw);
2348 
2349 		return 0;
2350 	}
2351 
2352 	PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2353 	return -EIO;
2354 }
2355 
2356 int
2357 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2358 			  struct rte_eth_rss_reta_entry64 *reta_conf,
2359 			  uint16_t reta_size)
2360 {
2361 	uint8_t i, j, mask;
2362 	uint32_t reta;
2363 	uint16_t idx, shift;
2364 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2365 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2366 
2367 	PMD_INIT_FUNC_TRACE();
2368 
2369 	if (!hw->is_pf) {
2370 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2371 			"NIC.");
2372 		return -ENOTSUP;
2373 	}
2374 
2375 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2376 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2377 			"(%d) doesn't match the number hardware can supported "
2378 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2379 		return -EINVAL;
2380 	}
2381 
2382 	for (i = 0; i < reta_size; i += 4) {
2383 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2384 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2385 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2386 		if (!mask)
2387 			continue;
2388 
2389 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2390 		for (j = 0; j < 4; j++) {
2391 			if (RS8(mask, j, 0x1)) {
2392 				reta  &= ~(MS32(8 * j, 0xFF));
2393 				reta |= LS32(reta_conf[idx].reta[shift + j],
2394 						8 * j, 0xFF);
2395 			}
2396 		}
2397 		wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2398 	}
2399 	adapter->rss_reta_updated = 1;
2400 
2401 	return 0;
2402 }
2403 
2404 int
2405 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2406 			 struct rte_eth_rss_reta_entry64 *reta_conf,
2407 			 uint16_t reta_size)
2408 {
2409 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2410 	uint8_t i, j, mask;
2411 	uint32_t reta;
2412 	uint16_t idx, shift;
2413 
2414 	PMD_INIT_FUNC_TRACE();
2415 
2416 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2417 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2418 			"(%d) doesn't match the number hardware can supported "
2419 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2420 		return -EINVAL;
2421 	}
2422 
2423 	for (i = 0; i < reta_size; i += 4) {
2424 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2425 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2426 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2427 		if (!mask)
2428 			continue;
2429 
2430 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2431 		for (j = 0; j < 4; j++) {
2432 			if (RS8(mask, j, 0x1))
2433 				reta_conf[idx].reta[shift + j] =
2434 					(uint16_t)RS32(reta, 8 * j, 0xFF);
2435 		}
2436 	}
2437 
2438 	return 0;
2439 }
2440 
2441 static int
2442 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2443 				uint32_t index, uint32_t pool)
2444 {
2445 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2446 	uint32_t enable_addr = 1;
2447 
2448 	return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2449 			     pool, enable_addr);
2450 }
2451 
2452 static void
2453 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2454 {
2455 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2456 
2457 	ngbe_clear_rar(hw, index);
2458 }
2459 
2460 static int
2461 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2462 {
2463 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2464 
2465 	ngbe_remove_rar(dev, 0);
2466 	ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2467 
2468 	return 0;
2469 }
2470 
2471 static int
2472 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2473 {
2474 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2475 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2476 	struct rte_eth_dev_data *dev_data = dev->data;
2477 
2478 	/* If device is started, refuse mtu that requires the support of
2479 	 * scattered packets when this feature has not been enabled before.
2480 	 */
2481 	if (dev_data->dev_started && !dev_data->scattered_rx &&
2482 	    (frame_size + 2 * RTE_VLAN_HLEN >
2483 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2484 		PMD_INIT_LOG(ERR, "Stop port first.");
2485 		return -EINVAL;
2486 	}
2487 
2488 	if (hw->mode)
2489 		wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2490 			NGBE_FRAME_SIZE_MAX);
2491 	else
2492 		wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2493 			NGBE_FRMSZ_MAX(frame_size));
2494 
2495 	return 0;
2496 }
2497 
2498 static uint32_t
2499 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2500 {
2501 	uint32_t vector = 0;
2502 
2503 	switch (hw->mac.mc_filter_type) {
2504 	case 0:   /* use bits [47:36] of the address */
2505 		vector = ((uc_addr->addr_bytes[4] >> 4) |
2506 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
2507 		break;
2508 	case 1:   /* use bits [46:35] of the address */
2509 		vector = ((uc_addr->addr_bytes[4] >> 3) |
2510 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
2511 		break;
2512 	case 2:   /* use bits [45:34] of the address */
2513 		vector = ((uc_addr->addr_bytes[4] >> 2) |
2514 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
2515 		break;
2516 	case 3:   /* use bits [43:32] of the address */
2517 		vector = ((uc_addr->addr_bytes[4]) |
2518 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
2519 		break;
2520 	default:  /* Invalid mc_filter_type */
2521 		break;
2522 	}
2523 
2524 	/* vector can only be 12-bits or boundary will be exceeded */
2525 	vector &= 0xFFF;
2526 	return vector;
2527 }
2528 
2529 static int
2530 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2531 			struct rte_ether_addr *mac_addr, uint8_t on)
2532 {
2533 	uint32_t vector;
2534 	uint32_t uta_idx;
2535 	uint32_t reg_val;
2536 	uint32_t uta_mask;
2537 	uint32_t psrctl;
2538 
2539 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2540 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2541 
2542 	vector = ngbe_uta_vector(hw, mac_addr);
2543 	uta_idx = (vector >> 5) & 0x7F;
2544 	uta_mask = 0x1UL << (vector & 0x1F);
2545 
2546 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2547 		return 0;
2548 
2549 	reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2550 	if (on) {
2551 		uta_info->uta_in_use++;
2552 		reg_val |= uta_mask;
2553 		uta_info->uta_shadow[uta_idx] |= uta_mask;
2554 	} else {
2555 		uta_info->uta_in_use--;
2556 		reg_val &= ~uta_mask;
2557 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2558 	}
2559 
2560 	wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2561 
2562 	psrctl = rd32(hw, NGBE_PSRCTL);
2563 	if (uta_info->uta_in_use > 0)
2564 		psrctl |= NGBE_PSRCTL_UCHFENA;
2565 	else
2566 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2567 
2568 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2569 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2570 	wr32(hw, NGBE_PSRCTL, psrctl);
2571 
2572 	return 0;
2573 }
2574 
2575 static int
2576 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2577 {
2578 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2579 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2580 	uint32_t psrctl;
2581 	int i;
2582 
2583 	if (on) {
2584 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2585 			uta_info->uta_shadow[i] = ~0;
2586 			wr32(hw, NGBE_UCADDRTBL(i), ~0);
2587 		}
2588 	} else {
2589 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2590 			uta_info->uta_shadow[i] = 0;
2591 			wr32(hw, NGBE_UCADDRTBL(i), 0);
2592 		}
2593 	}
2594 
2595 	psrctl = rd32(hw, NGBE_PSRCTL);
2596 	if (on)
2597 		psrctl |= NGBE_PSRCTL_UCHFENA;
2598 	else
2599 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2600 
2601 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2602 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2603 	wr32(hw, NGBE_PSRCTL, psrctl);
2604 
2605 	return 0;
2606 }
2607 
2608 /**
2609  * Set the IVAR registers, mapping interrupt causes to vectors
2610  * @param hw
2611  *  pointer to ngbe_hw struct
2612  * @direction
2613  *  0 for Rx, 1 for Tx, -1 for other causes
2614  * @queue
2615  *  queue to map the corresponding interrupt to
2616  * @msix_vector
2617  *  the vector to map to the corresponding queue
2618  */
2619 void
2620 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2621 		   uint8_t queue, uint8_t msix_vector)
2622 {
2623 	uint32_t tmp, idx;
2624 
2625 	if (direction == -1) {
2626 		/* other causes */
2627 		msix_vector |= NGBE_IVARMISC_VLD;
2628 		idx = 0;
2629 		tmp = rd32(hw, NGBE_IVARMISC);
2630 		tmp &= ~(0xFF << idx);
2631 		tmp |= (msix_vector << idx);
2632 		wr32(hw, NGBE_IVARMISC, tmp);
2633 	} else {
2634 		/* rx or tx causes */
2635 		/* Workaround for ICR lost */
2636 		idx = ((16 * (queue & 1)) + (8 * direction));
2637 		tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2638 		tmp &= ~(0xFF << idx);
2639 		tmp |= (msix_vector << idx);
2640 		wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2641 	}
2642 }
2643 
2644 /**
2645  * Sets up the hardware to properly generate MSI-X interrupts
2646  * @hw
2647  *  board private structure
2648  */
2649 static void
2650 ngbe_configure_msix(struct rte_eth_dev *dev)
2651 {
2652 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2653 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2654 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2655 	uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2656 	uint32_t vec = NGBE_MISC_VEC_ID;
2657 	uint32_t gpie;
2658 
2659 	/*
2660 	 * Won't configure MSI-X register if no mapping is done
2661 	 * between intr vector and event fd
2662 	 * but if MSI-X has been enabled already, need to configure
2663 	 * auto clean, auto mask and throttling.
2664 	 */
2665 	gpie = rd32(hw, NGBE_GPIE);
2666 	if (!rte_intr_dp_is_en(intr_handle) &&
2667 	    !(gpie & NGBE_GPIE_MSIX))
2668 		return;
2669 
2670 	if (rte_intr_allow_others(intr_handle)) {
2671 		base = NGBE_RX_VEC_START;
2672 		vec = base;
2673 	}
2674 
2675 	/* setup GPIE for MSI-X mode */
2676 	gpie = rd32(hw, NGBE_GPIE);
2677 	gpie |= NGBE_GPIE_MSIX;
2678 	wr32(hw, NGBE_GPIE, gpie);
2679 
2680 	/* Populate the IVAR table and set the ITR values to the
2681 	 * corresponding register.
2682 	 */
2683 	if (rte_intr_dp_is_en(intr_handle)) {
2684 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2685 			queue_id++) {
2686 			/* by default, 1:1 mapping */
2687 			ngbe_set_ivar_map(hw, 0, queue_id, vec);
2688 			rte_intr_vec_list_index_set(intr_handle,
2689 							   queue_id, vec);
2690 			if (vec < base + rte_intr_nb_efd_get(intr_handle)
2691 			    - 1)
2692 				vec++;
2693 		}
2694 
2695 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2696 	}
2697 	wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2698 			NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2699 			| NGBE_ITR_WRDSA);
2700 }
2701 
2702 static u8 *
2703 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2704 			u8 **mc_addr_ptr, u32 *vmdq)
2705 {
2706 	u8 *mc_addr;
2707 
2708 	*vmdq = 0;
2709 	mc_addr = *mc_addr_ptr;
2710 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2711 	return mc_addr;
2712 }
2713 
2714 int
2715 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2716 			  struct rte_ether_addr *mc_addr_set,
2717 			  uint32_t nb_mc_addr)
2718 {
2719 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2720 	u8 *mc_addr_list;
2721 
2722 	mc_addr_list = (u8 *)mc_addr_set;
2723 	return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2724 					 ngbe_dev_addr_list_itr, TRUE);
2725 }
2726 
2727 static uint64_t
2728 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
2729 {
2730 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2731 	uint64_t systime_cycles;
2732 
2733 	systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
2734 	systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
2735 
2736 	return systime_cycles;
2737 }
2738 
2739 static uint64_t
2740 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2741 {
2742 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2743 	uint64_t rx_tstamp_cycles;
2744 
2745 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
2746 	rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
2747 	rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
2748 
2749 	return rx_tstamp_cycles;
2750 }
2751 
2752 static uint64_t
2753 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2754 {
2755 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2756 	uint64_t tx_tstamp_cycles;
2757 
2758 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
2759 	tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
2760 	tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
2761 
2762 	return tx_tstamp_cycles;
2763 }
2764 
2765 static void
2766 ngbe_start_timecounters(struct rte_eth_dev *dev)
2767 {
2768 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2769 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2770 	uint32_t incval = 0;
2771 	uint32_t shift = 0;
2772 
2773 	incval = NGBE_INCVAL_1GB;
2774 	shift = NGBE_INCVAL_SHIFT_1GB;
2775 
2776 	wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
2777 
2778 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
2779 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2780 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2781 
2782 	adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2783 	adapter->systime_tc.cc_shift = shift;
2784 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
2785 
2786 	adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2787 	adapter->rx_tstamp_tc.cc_shift = shift;
2788 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2789 
2790 	adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2791 	adapter->tx_tstamp_tc.cc_shift = shift;
2792 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2793 }
2794 
2795 static int
2796 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2797 {
2798 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2799 
2800 	adapter->systime_tc.nsec += delta;
2801 	adapter->rx_tstamp_tc.nsec += delta;
2802 	adapter->tx_tstamp_tc.nsec += delta;
2803 
2804 	return 0;
2805 }
2806 
2807 static int
2808 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2809 {
2810 	uint64_t ns;
2811 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2812 
2813 	ns = rte_timespec_to_ns(ts);
2814 	/* Set the timecounters to a new value. */
2815 	adapter->systime_tc.nsec = ns;
2816 	adapter->rx_tstamp_tc.nsec = ns;
2817 	adapter->tx_tstamp_tc.nsec = ns;
2818 
2819 	return 0;
2820 }
2821 
2822 static int
2823 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2824 {
2825 	uint64_t ns, systime_cycles;
2826 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2827 
2828 	systime_cycles = ngbe_read_systime_cyclecounter(dev);
2829 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
2830 	*ts = rte_ns_to_timespec(ns);
2831 
2832 	return 0;
2833 }
2834 
2835 static int
2836 ngbe_timesync_enable(struct rte_eth_dev *dev)
2837 {
2838 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2839 	uint32_t tsync_ctl;
2840 
2841 	/* Stop the timesync system time. */
2842 	wr32(hw, NGBE_TSTIMEINC, 0x0);
2843 	/* Reset the timesync system time value. */
2844 	wr32(hw, NGBE_TSTIMEL, 0x0);
2845 	wr32(hw, NGBE_TSTIMEH, 0x0);
2846 
2847 	ngbe_start_timecounters(dev);
2848 
2849 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2850 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588),
2851 		RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588);
2852 
2853 	/* Enable timestamping of received PTP packets. */
2854 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2855 	tsync_ctl |= NGBE_TSRXCTL_ENA;
2856 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2857 
2858 	/* Enable timestamping of transmitted PTP packets. */
2859 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2860 	tsync_ctl |= NGBE_TSTXCTL_ENA;
2861 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2862 
2863 	ngbe_flush(hw);
2864 
2865 	return 0;
2866 }
2867 
2868 static int
2869 ngbe_timesync_disable(struct rte_eth_dev *dev)
2870 {
2871 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2872 	uint32_t tsync_ctl;
2873 
2874 	/* Disable timestamping of transmitted PTP packets. */
2875 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2876 	tsync_ctl &= ~NGBE_TSTXCTL_ENA;
2877 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2878 
2879 	/* Disable timestamping of received PTP packets. */
2880 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2881 	tsync_ctl &= ~NGBE_TSRXCTL_ENA;
2882 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2883 
2884 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2885 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
2886 
2887 	/* Stop incrementing the System Time registers. */
2888 	wr32(hw, NGBE_TSTIMEINC, 0);
2889 
2890 	return 0;
2891 }
2892 
2893 static int
2894 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
2895 				 struct timespec *timestamp,
2896 				 uint32_t flags __rte_unused)
2897 {
2898 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2899 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2900 	uint32_t tsync_rxctl;
2901 	uint64_t rx_tstamp_cycles;
2902 	uint64_t ns;
2903 
2904 	tsync_rxctl = rd32(hw, NGBE_TSRXCTL);
2905 	if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0)
2906 		return -EINVAL;
2907 
2908 	rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev);
2909 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
2910 	*timestamp = rte_ns_to_timespec(ns);
2911 
2912 	return  0;
2913 }
2914 
2915 static int
2916 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
2917 				 struct timespec *timestamp)
2918 {
2919 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2920 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2921 	uint32_t tsync_txctl;
2922 	uint64_t tx_tstamp_cycles;
2923 	uint64_t ns;
2924 
2925 	tsync_txctl = rd32(hw, NGBE_TSTXCTL);
2926 	if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0)
2927 		return -EINVAL;
2928 
2929 	tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev);
2930 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
2931 	*timestamp = rte_ns_to_timespec(ns);
2932 
2933 	return 0;
2934 }
2935 
2936 static int
2937 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
2938 {
2939 	int count = 0;
2940 	int g_ind = 0;
2941 	const struct reg_info *reg_group;
2942 	const struct reg_info **reg_set = ngbe_regs_others;
2943 
2944 	while ((reg_group = reg_set[g_ind++]))
2945 		count += ngbe_regs_group_count(reg_group);
2946 
2947 	return count;
2948 }
2949 
2950 static int
2951 ngbe_get_regs(struct rte_eth_dev *dev,
2952 	      struct rte_dev_reg_info *regs)
2953 {
2954 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2955 	uint32_t *data = regs->data;
2956 	int g_ind = 0;
2957 	int count = 0;
2958 	const struct reg_info *reg_group;
2959 	const struct reg_info **reg_set = ngbe_regs_others;
2960 
2961 	if (data == NULL) {
2962 		regs->length = ngbe_get_reg_length(dev);
2963 		regs->width = sizeof(uint32_t);
2964 		return 0;
2965 	}
2966 
2967 	/* Support only full register dump */
2968 	if (regs->length == 0 ||
2969 	    regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
2970 		regs->version = hw->mac.type << 24 |
2971 				hw->revision_id << 16 |
2972 				hw->device_id;
2973 		while ((reg_group = reg_set[g_ind++]))
2974 			count += ngbe_read_regs_group(dev, &data[count],
2975 						      reg_group);
2976 		return 0;
2977 	}
2978 
2979 	return -ENOTSUP;
2980 }
2981 
2982 static int
2983 ngbe_get_eeprom_length(struct rte_eth_dev *dev)
2984 {
2985 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2986 
2987 	/* Return unit is byte count */
2988 	return hw->rom.word_size * 2;
2989 }
2990 
2991 static int
2992 ngbe_get_eeprom(struct rte_eth_dev *dev,
2993 		struct rte_dev_eeprom_info *in_eeprom)
2994 {
2995 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2996 	struct ngbe_rom_info *eeprom = &hw->rom;
2997 	uint16_t *data = in_eeprom->data;
2998 	int first, length;
2999 
3000 	first = in_eeprom->offset >> 1;
3001 	length = in_eeprom->length >> 1;
3002 	if (first > hw->rom.word_size ||
3003 	    ((first + length) > hw->rom.word_size))
3004 		return -EINVAL;
3005 
3006 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3007 
3008 	return eeprom->readw_buffer(hw, first, length, data);
3009 }
3010 
3011 static int
3012 ngbe_set_eeprom(struct rte_eth_dev *dev,
3013 		struct rte_dev_eeprom_info *in_eeprom)
3014 {
3015 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3016 	struct ngbe_rom_info *eeprom = &hw->rom;
3017 	uint16_t *data = in_eeprom->data;
3018 	int first, length;
3019 
3020 	first = in_eeprom->offset >> 1;
3021 	length = in_eeprom->length >> 1;
3022 	if (first > hw->rom.word_size ||
3023 	    ((first + length) > hw->rom.word_size))
3024 		return -EINVAL;
3025 
3026 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3027 
3028 	return eeprom->writew_buffer(hw,  first, length, data);
3029 }
3030 
3031 static const struct eth_dev_ops ngbe_eth_dev_ops = {
3032 	.dev_configure              = ngbe_dev_configure,
3033 	.dev_infos_get              = ngbe_dev_info_get,
3034 	.dev_start                  = ngbe_dev_start,
3035 	.dev_stop                   = ngbe_dev_stop,
3036 	.dev_close                  = ngbe_dev_close,
3037 	.dev_reset                  = ngbe_dev_reset,
3038 	.promiscuous_enable         = ngbe_dev_promiscuous_enable,
3039 	.promiscuous_disable        = ngbe_dev_promiscuous_disable,
3040 	.allmulticast_enable        = ngbe_dev_allmulticast_enable,
3041 	.allmulticast_disable       = ngbe_dev_allmulticast_disable,
3042 	.link_update                = ngbe_dev_link_update,
3043 	.stats_get                  = ngbe_dev_stats_get,
3044 	.xstats_get                 = ngbe_dev_xstats_get,
3045 	.xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
3046 	.stats_reset                = ngbe_dev_stats_reset,
3047 	.xstats_reset               = ngbe_dev_xstats_reset,
3048 	.xstats_get_names           = ngbe_dev_xstats_get_names,
3049 	.xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
3050 	.fw_version_get             = ngbe_fw_version_get,
3051 	.dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
3052 	.mtu_set                    = ngbe_dev_mtu_set,
3053 	.vlan_filter_set            = ngbe_vlan_filter_set,
3054 	.vlan_tpid_set              = ngbe_vlan_tpid_set,
3055 	.vlan_offload_set           = ngbe_vlan_offload_set,
3056 	.vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
3057 	.rx_queue_start	            = ngbe_dev_rx_queue_start,
3058 	.rx_queue_stop              = ngbe_dev_rx_queue_stop,
3059 	.tx_queue_start	            = ngbe_dev_tx_queue_start,
3060 	.tx_queue_stop              = ngbe_dev_tx_queue_stop,
3061 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
3062 	.rx_queue_release           = ngbe_dev_rx_queue_release,
3063 	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
3064 	.tx_queue_release           = ngbe_dev_tx_queue_release,
3065 	.dev_led_on                 = ngbe_dev_led_on,
3066 	.dev_led_off                = ngbe_dev_led_off,
3067 	.flow_ctrl_get              = ngbe_flow_ctrl_get,
3068 	.flow_ctrl_set              = ngbe_flow_ctrl_set,
3069 	.mac_addr_add               = ngbe_add_rar,
3070 	.mac_addr_remove            = ngbe_remove_rar,
3071 	.mac_addr_set               = ngbe_set_default_mac_addr,
3072 	.uc_hash_table_set          = ngbe_uc_hash_table_set,
3073 	.uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
3074 	.reta_update                = ngbe_dev_rss_reta_update,
3075 	.reta_query                 = ngbe_dev_rss_reta_query,
3076 	.rss_hash_update            = ngbe_dev_rss_hash_update,
3077 	.rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
3078 	.set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
3079 	.rxq_info_get               = ngbe_rxq_info_get,
3080 	.txq_info_get               = ngbe_txq_info_get,
3081 	.rx_burst_mode_get          = ngbe_rx_burst_mode_get,
3082 	.tx_burst_mode_get          = ngbe_tx_burst_mode_get,
3083 	.timesync_enable            = ngbe_timesync_enable,
3084 	.timesync_disable           = ngbe_timesync_disable,
3085 	.timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
3086 	.timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp,
3087 	.get_reg                    = ngbe_get_regs,
3088 	.get_eeprom_length          = ngbe_get_eeprom_length,
3089 	.get_eeprom                 = ngbe_get_eeprom,
3090 	.set_eeprom                 = ngbe_set_eeprom,
3091 	.timesync_adjust_time       = ngbe_timesync_adjust_time,
3092 	.timesync_read_time         = ngbe_timesync_read_time,
3093 	.timesync_write_time        = ngbe_timesync_write_time,
3094 	.tx_done_cleanup            = ngbe_dev_tx_done_cleanup,
3095 };
3096 
3097 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
3098 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
3099 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
3100 
3101 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
3102 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
3103 
3104 #ifdef RTE_ETHDEV_DEBUG_RX
3105 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
3106 #endif
3107 #ifdef RTE_ETHDEV_DEBUG_TX
3108 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
3109 #endif
3110