xref: /dpdk/drivers/net/ngbe/ngbe_ethdev.c (revision 592ab76f9f0f41993bebb44da85c37750a93ece9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9 
10 #include <rte_alarm.h>
11 
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16 #include "ngbe_regs_group.h"
17 
18 static const struct reg_info ngbe_regs_general[] = {
19 	{NGBE_RST, 1, 1, "NGBE_RST"},
20 	{NGBE_STAT, 1, 1, "NGBE_STAT"},
21 	{NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
22 	{NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
23 	{NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
24 	{NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
25 	{0, 0, 0, ""}
26 };
27 
28 static const struct reg_info ngbe_regs_nvm[] = {
29 	{0, 0, 0, ""}
30 };
31 
32 static const struct reg_info ngbe_regs_interrupt[] = {
33 	{0, 0, 0, ""}
34 };
35 
36 static const struct reg_info ngbe_regs_fctl_others[] = {
37 	{0, 0, 0, ""}
38 };
39 
40 static const struct reg_info ngbe_regs_rxdma[] = {
41 	{0, 0, 0, ""}
42 };
43 
44 static const struct reg_info ngbe_regs_rx[] = {
45 	{0, 0, 0, ""}
46 };
47 
48 static struct reg_info ngbe_regs_tx[] = {
49 	{0, 0, 0, ""}
50 };
51 
52 static const struct reg_info ngbe_regs_wakeup[] = {
53 	{0, 0, 0, ""}
54 };
55 
56 static const struct reg_info ngbe_regs_mac[] = {
57 	{0, 0, 0, ""}
58 };
59 
60 static const struct reg_info ngbe_regs_diagnostic[] = {
61 	{0, 0, 0, ""},
62 };
63 
64 /* PF registers */
65 static const struct reg_info *ngbe_regs_others[] = {
66 				ngbe_regs_general,
67 				ngbe_regs_nvm,
68 				ngbe_regs_interrupt,
69 				ngbe_regs_fctl_others,
70 				ngbe_regs_rxdma,
71 				ngbe_regs_rx,
72 				ngbe_regs_tx,
73 				ngbe_regs_wakeup,
74 				ngbe_regs_mac,
75 				ngbe_regs_diagnostic,
76 				NULL};
77 
78 static int ngbe_dev_close(struct rte_eth_dev *dev);
79 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
80 				int wait_to_complete);
81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
84 					uint16_t queue);
85 
86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
91 static void ngbe_dev_interrupt_handler(void *param);
92 static void ngbe_configure_msix(struct rte_eth_dev *dev);
93 
94 #define NGBE_SET_HWSTRIP(h, q) do {\
95 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
96 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
97 		(h)->bitmap[idx] |= 1 << bit;\
98 	} while (0)
99 
100 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
101 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
102 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
103 		(h)->bitmap[idx] &= ~(1 << bit);\
104 	} while (0)
105 
106 #define NGBE_GET_HWSTRIP(h, q, r) do {\
107 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
108 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
109 		(r) = (h)->bitmap[idx] >> bit & 1;\
110 	} while (0)
111 
112 /*
113  * The set of PCI devices this driver supports
114  */
115 static const struct rte_pci_id pci_id_ngbe_map[] = {
116 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
117 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
118 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
119 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
120 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
121 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
122 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
123 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
124 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
128 	{ .vendor_id = 0, /* sentinel */ },
129 };
130 
131 static const struct rte_eth_desc_lim rx_desc_lim = {
132 	.nb_max = NGBE_RING_DESC_MAX,
133 	.nb_min = NGBE_RING_DESC_MIN,
134 	.nb_align = NGBE_RXD_ALIGN,
135 };
136 
137 static const struct rte_eth_desc_lim tx_desc_lim = {
138 	.nb_max = NGBE_RING_DESC_MAX,
139 	.nb_min = NGBE_RING_DESC_MIN,
140 	.nb_align = NGBE_TXD_ALIGN,
141 	.nb_seg_max = NGBE_TX_MAX_SEG,
142 	.nb_mtu_seg_max = NGBE_TX_MAX_SEG,
143 };
144 
145 static const struct eth_dev_ops ngbe_eth_dev_ops;
146 
147 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
148 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
149 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
150 	/* MNG RxTx */
151 	HW_XSTAT(mng_bmc2host_packets),
152 	HW_XSTAT(mng_host2bmc_packets),
153 	/* Basic RxTx */
154 	HW_XSTAT(rx_packets),
155 	HW_XSTAT(tx_packets),
156 	HW_XSTAT(rx_bytes),
157 	HW_XSTAT(tx_bytes),
158 	HW_XSTAT(rx_total_bytes),
159 	HW_XSTAT(rx_total_packets),
160 	HW_XSTAT(tx_total_packets),
161 	HW_XSTAT(rx_total_missed_packets),
162 	HW_XSTAT(rx_broadcast_packets),
163 	HW_XSTAT(rx_multicast_packets),
164 	HW_XSTAT(rx_management_packets),
165 	HW_XSTAT(tx_management_packets),
166 	HW_XSTAT(rx_management_dropped),
167 
168 	/* Basic Error */
169 	HW_XSTAT(rx_crc_errors),
170 	HW_XSTAT(rx_illegal_byte_errors),
171 	HW_XSTAT(rx_error_bytes),
172 	HW_XSTAT(rx_mac_short_packet_dropped),
173 	HW_XSTAT(rx_length_errors),
174 	HW_XSTAT(rx_undersize_errors),
175 	HW_XSTAT(rx_fragment_errors),
176 	HW_XSTAT(rx_oversize_errors),
177 	HW_XSTAT(rx_jabber_errors),
178 	HW_XSTAT(rx_l3_l4_xsum_error),
179 	HW_XSTAT(mac_local_errors),
180 	HW_XSTAT(mac_remote_errors),
181 
182 	/* MACSEC */
183 	HW_XSTAT(tx_macsec_pkts_untagged),
184 	HW_XSTAT(tx_macsec_pkts_encrypted),
185 	HW_XSTAT(tx_macsec_pkts_protected),
186 	HW_XSTAT(tx_macsec_octets_encrypted),
187 	HW_XSTAT(tx_macsec_octets_protected),
188 	HW_XSTAT(rx_macsec_pkts_untagged),
189 	HW_XSTAT(rx_macsec_pkts_badtag),
190 	HW_XSTAT(rx_macsec_pkts_nosci),
191 	HW_XSTAT(rx_macsec_pkts_unknownsci),
192 	HW_XSTAT(rx_macsec_octets_decrypted),
193 	HW_XSTAT(rx_macsec_octets_validated),
194 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
195 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
196 	HW_XSTAT(rx_macsec_sc_pkts_late),
197 	HW_XSTAT(rx_macsec_sa_pkts_ok),
198 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
199 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
200 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
201 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
202 
203 	/* MAC RxTx */
204 	HW_XSTAT(rx_size_64_packets),
205 	HW_XSTAT(rx_size_65_to_127_packets),
206 	HW_XSTAT(rx_size_128_to_255_packets),
207 	HW_XSTAT(rx_size_256_to_511_packets),
208 	HW_XSTAT(rx_size_512_to_1023_packets),
209 	HW_XSTAT(rx_size_1024_to_max_packets),
210 	HW_XSTAT(tx_size_64_packets),
211 	HW_XSTAT(tx_size_65_to_127_packets),
212 	HW_XSTAT(tx_size_128_to_255_packets),
213 	HW_XSTAT(tx_size_256_to_511_packets),
214 	HW_XSTAT(tx_size_512_to_1023_packets),
215 	HW_XSTAT(tx_size_1024_to_max_packets),
216 
217 	/* Flow Control */
218 	HW_XSTAT(tx_xon_packets),
219 	HW_XSTAT(rx_xon_packets),
220 	HW_XSTAT(tx_xoff_packets),
221 	HW_XSTAT(rx_xoff_packets),
222 
223 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
224 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
225 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
226 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
227 };
228 
229 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
230 			   sizeof(rte_ngbe_stats_strings[0]))
231 
232 /* Per-queue statistics */
233 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
234 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
235 	QP_XSTAT(rx_qp_packets),
236 	QP_XSTAT(tx_qp_packets),
237 	QP_XSTAT(rx_qp_bytes),
238 	QP_XSTAT(tx_qp_bytes),
239 	QP_XSTAT(rx_qp_mc_packets),
240 };
241 
242 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
243 			   sizeof(rte_ngbe_qp_strings[0]))
244 
245 static inline int32_t
246 ngbe_pf_reset_hw(struct ngbe_hw *hw)
247 {
248 	uint32_t ctrl_ext;
249 	int32_t status;
250 
251 	status = hw->mac.reset_hw(hw);
252 
253 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
254 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
255 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
256 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
257 	ngbe_flush(hw);
258 
259 	if (status == NGBE_ERR_SFP_NOT_PRESENT)
260 		status = 0;
261 	return status;
262 }
263 
264 static inline void
265 ngbe_enable_intr(struct rte_eth_dev *dev)
266 {
267 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
268 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
269 
270 	wr32(hw, NGBE_IENMISC, intr->mask_misc);
271 	wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
272 	ngbe_flush(hw);
273 }
274 
275 static void
276 ngbe_disable_intr(struct ngbe_hw *hw)
277 {
278 	PMD_INIT_FUNC_TRACE();
279 
280 	wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
281 	ngbe_flush(hw);
282 }
283 
284 /*
285  * Ensure that all locks are released before first NVM or PHY access
286  */
287 static void
288 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
289 {
290 	uint16_t mask;
291 
292 	/*
293 	 * These ones are more tricky since they are common to all ports; but
294 	 * swfw_sync retries last long enough (1s) to be almost sure that if
295 	 * lock can not be taken it is due to an improper lock of the
296 	 * semaphore.
297 	 */
298 	mask = NGBE_MNGSEM_SWPHY |
299 	       NGBE_MNGSEM_SWMBX |
300 	       NGBE_MNGSEM_SWFLASH;
301 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
302 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
303 
304 	hw->mac.release_swfw_sync(hw, mask);
305 }
306 
307 static int
308 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
309 {
310 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
311 	struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
312 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
313 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
314 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
315 	const struct rte_memzone *mz;
316 	uint32_t ctrl_ext;
317 	u32 led_conf = 0;
318 	int err, ret;
319 
320 	PMD_INIT_FUNC_TRACE();
321 
322 	eth_dev->dev_ops = &ngbe_eth_dev_ops;
323 	eth_dev->rx_queue_count       = ngbe_dev_rx_queue_count;
324 	eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
325 	eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
326 	eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
327 	eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
328 	eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
329 
330 	/*
331 	 * For secondary processes, we don't initialise any further as primary
332 	 * has already done this work. Only check we don't need a different
333 	 * Rx and Tx function.
334 	 */
335 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
336 		struct ngbe_tx_queue *txq;
337 		/* Tx queue function in primary, set by last queue initialized
338 		 * Tx queue may not initialized by primary process
339 		 */
340 		if (eth_dev->data->tx_queues) {
341 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
342 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
343 			ngbe_set_tx_function(eth_dev, txq);
344 		} else {
345 			/* Use default Tx function if we get here */
346 			PMD_INIT_LOG(NOTICE,
347 				"No Tx queues configured yet. Using default Tx function.");
348 		}
349 
350 		ngbe_set_rx_function(eth_dev);
351 
352 		return 0;
353 	}
354 
355 	rte_eth_copy_pci_info(eth_dev, pci_dev);
356 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
357 
358 	/* Vendor and Device ID need to be set before init of shared code */
359 	hw->back = pci_dev;
360 	hw->device_id = pci_dev->id.device_id;
361 	hw->vendor_id = pci_dev->id.vendor_id;
362 	hw->sub_system_id = pci_dev->id.subsystem_device_id;
363 	ngbe_map_device_id(hw);
364 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
365 
366 	/* Reserve memory for interrupt status block */
367 	mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
368 		NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
369 	if (mz == NULL)
370 		return -ENOMEM;
371 
372 	hw->isb_dma = TMZ_PADDR(mz);
373 	hw->isb_mem = TMZ_VADDR(mz);
374 
375 	/* Initialize the shared code (base driver) */
376 	err = ngbe_init_shared_code(hw);
377 	if (err != 0) {
378 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
379 		return -EIO;
380 	}
381 
382 	/* Unlock any pending hardware semaphore */
383 	ngbe_swfw_lock_reset(hw);
384 
385 	/* Get Hardware Flow Control setting */
386 	hw->fc.requested_mode = ngbe_fc_full;
387 	hw->fc.current_mode = ngbe_fc_full;
388 	hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
389 	hw->fc.low_water = NGBE_FC_XON_LOTH;
390 	hw->fc.high_water = NGBE_FC_XOFF_HITH;
391 	hw->fc.send_xon = 1;
392 
393 	err = hw->rom.init_params(hw);
394 	if (err != 0) {
395 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
396 		return -EIO;
397 	}
398 
399 	/* Make sure we have a good EEPROM before we read from it */
400 	err = hw->rom.validate_checksum(hw, NULL);
401 	if (err != 0) {
402 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
403 		return -EIO;
404 	}
405 
406 	err = hw->phy.led_oem_chk(hw, &led_conf);
407 	if (err == 0)
408 		hw->led_conf = led_conf;
409 	else
410 		hw->led_conf = 0xFFFF;
411 
412 	err = hw->mac.init_hw(hw);
413 	if (err != 0) {
414 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
415 		return -EIO;
416 	}
417 
418 	/* Reset the hw statistics */
419 	ngbe_dev_stats_reset(eth_dev);
420 
421 	/* disable interrupt */
422 	ngbe_disable_intr(hw);
423 
424 	/* Allocate memory for storing MAC addresses */
425 	eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
426 					       hw->mac.num_rar_entries, 0);
427 	if (eth_dev->data->mac_addrs == NULL) {
428 		PMD_INIT_LOG(ERR,
429 			     "Failed to allocate %u bytes needed to store MAC addresses",
430 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
431 		return -ENOMEM;
432 	}
433 
434 	/* Copy the permanent MAC address */
435 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
436 			&eth_dev->data->mac_addrs[0]);
437 
438 	/* Allocate memory for storing hash filter MAC addresses */
439 	eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
440 			RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
441 	if (eth_dev->data->hash_mac_addrs == NULL) {
442 		PMD_INIT_LOG(ERR,
443 			     "Failed to allocate %d bytes needed to store MAC addresses",
444 			     RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
445 		rte_free(eth_dev->data->mac_addrs);
446 		eth_dev->data->mac_addrs = NULL;
447 		return -ENOMEM;
448 	}
449 
450 	/* initialize the vfta */
451 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
452 
453 	/* initialize the hw strip bitmap*/
454 	memset(hwstrip, 0, sizeof(*hwstrip));
455 
456 	/* initialize PF if max_vfs not zero */
457 	ret = ngbe_pf_host_init(eth_dev);
458 	if (ret) {
459 		rte_free(eth_dev->data->mac_addrs);
460 		eth_dev->data->mac_addrs = NULL;
461 		rte_free(eth_dev->data->hash_mac_addrs);
462 		eth_dev->data->hash_mac_addrs = NULL;
463 		return ret;
464 	}
465 
466 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
467 	/* let hardware know driver is loaded */
468 	ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
469 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
470 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
471 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
472 	ngbe_flush(hw);
473 
474 	PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
475 			(int)hw->mac.type, (int)hw->phy.type);
476 
477 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
478 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
479 		     pci_dev->id.device_id);
480 
481 	rte_intr_callback_register(intr_handle,
482 				   ngbe_dev_interrupt_handler, eth_dev);
483 
484 	/* enable uio/vfio intr/eventfd mapping */
485 	rte_intr_enable(intr_handle);
486 
487 	/* enable support intr */
488 	ngbe_enable_intr(eth_dev);
489 
490 	return 0;
491 }
492 
493 static int
494 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
495 {
496 	PMD_INIT_FUNC_TRACE();
497 
498 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
499 		return 0;
500 
501 	ngbe_dev_close(eth_dev);
502 
503 	return 0;
504 }
505 
506 static int
507 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
508 		struct rte_pci_device *pci_dev)
509 {
510 	return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
511 			sizeof(struct ngbe_adapter),
512 			eth_dev_pci_specific_init, pci_dev,
513 			eth_ngbe_dev_init, NULL);
514 }
515 
516 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
517 {
518 	struct rte_eth_dev *ethdev;
519 
520 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
521 	if (ethdev == NULL)
522 		return 0;
523 
524 	return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
525 }
526 
527 static struct rte_pci_driver rte_ngbe_pmd = {
528 	.id_table = pci_id_ngbe_map,
529 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
530 		     RTE_PCI_DRV_INTR_LSC,
531 	.probe = eth_ngbe_pci_probe,
532 	.remove = eth_ngbe_pci_remove,
533 };
534 
535 static int
536 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
537 {
538 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
539 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
540 	uint32_t vfta;
541 	uint32_t vid_idx;
542 	uint32_t vid_bit;
543 
544 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
545 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
546 	vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
547 	if (on)
548 		vfta |= vid_bit;
549 	else
550 		vfta &= ~vid_bit;
551 	wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
552 
553 	/* update local VFTA copy */
554 	shadow_vfta->vfta[vid_idx] = vfta;
555 
556 	return 0;
557 }
558 
559 static void
560 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
561 {
562 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
563 	struct ngbe_rx_queue *rxq;
564 	bool restart;
565 	uint32_t rxcfg, rxbal, rxbah;
566 
567 	if (on)
568 		ngbe_vlan_hw_strip_enable(dev, queue);
569 	else
570 		ngbe_vlan_hw_strip_disable(dev, queue);
571 
572 	rxq = dev->data->rx_queues[queue];
573 	rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
574 	rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
575 	rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
576 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
577 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
578 			!(rxcfg & NGBE_RXCFG_VLAN);
579 		rxcfg |= NGBE_RXCFG_VLAN;
580 	} else {
581 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
582 			(rxcfg & NGBE_RXCFG_VLAN);
583 		rxcfg &= ~NGBE_RXCFG_VLAN;
584 	}
585 	rxcfg &= ~NGBE_RXCFG_ENA;
586 
587 	if (restart) {
588 		/* set vlan strip for ring */
589 		ngbe_dev_rx_queue_stop(dev, queue);
590 		wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
591 		wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
592 		wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
593 		ngbe_dev_rx_queue_start(dev, queue);
594 	}
595 }
596 
597 static int
598 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
599 		    enum rte_vlan_type vlan_type,
600 		    uint16_t tpid)
601 {
602 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
603 	int ret = 0;
604 	uint32_t portctrl, vlan_ext, qinq;
605 
606 	portctrl = rd32(hw, NGBE_PORTCTL);
607 
608 	vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
609 	qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
610 	switch (vlan_type) {
611 	case RTE_ETH_VLAN_TYPE_INNER:
612 		if (vlan_ext) {
613 			wr32m(hw, NGBE_VLANCTL,
614 				NGBE_VLANCTL_TPID_MASK,
615 				NGBE_VLANCTL_TPID(tpid));
616 			wr32m(hw, NGBE_DMATXCTRL,
617 				NGBE_DMATXCTRL_TPID_MASK,
618 				NGBE_DMATXCTRL_TPID(tpid));
619 		} else {
620 			ret = -ENOTSUP;
621 			PMD_DRV_LOG(ERR,
622 				"Inner type is not supported by single VLAN");
623 		}
624 
625 		if (qinq) {
626 			wr32m(hw, NGBE_TAGTPID(0),
627 				NGBE_TAGTPID_LSB_MASK,
628 				NGBE_TAGTPID_LSB(tpid));
629 		}
630 		break;
631 	case RTE_ETH_VLAN_TYPE_OUTER:
632 		if (vlan_ext) {
633 			/* Only the high 16-bits is valid */
634 			wr32m(hw, NGBE_EXTAG,
635 				NGBE_EXTAG_VLAN_MASK,
636 				NGBE_EXTAG_VLAN(tpid));
637 		} else {
638 			wr32m(hw, NGBE_VLANCTL,
639 				NGBE_VLANCTL_TPID_MASK,
640 				NGBE_VLANCTL_TPID(tpid));
641 			wr32m(hw, NGBE_DMATXCTRL,
642 				NGBE_DMATXCTRL_TPID_MASK,
643 				NGBE_DMATXCTRL_TPID(tpid));
644 		}
645 
646 		if (qinq) {
647 			wr32m(hw, NGBE_TAGTPID(0),
648 				NGBE_TAGTPID_MSB_MASK,
649 				NGBE_TAGTPID_MSB(tpid));
650 		}
651 		break;
652 	default:
653 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
654 		return -EINVAL;
655 	}
656 
657 	return ret;
658 }
659 
660 void
661 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
662 {
663 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
664 	uint32_t vlnctrl;
665 
666 	PMD_INIT_FUNC_TRACE();
667 
668 	/* Filter Table Disable */
669 	vlnctrl = rd32(hw, NGBE_VLANCTL);
670 	vlnctrl &= ~NGBE_VLANCTL_VFE;
671 	wr32(hw, NGBE_VLANCTL, vlnctrl);
672 }
673 
674 void
675 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
676 {
677 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
678 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
679 	uint32_t vlnctrl;
680 	uint16_t i;
681 
682 	PMD_INIT_FUNC_TRACE();
683 
684 	/* Filter Table Enable */
685 	vlnctrl = rd32(hw, NGBE_VLANCTL);
686 	vlnctrl &= ~NGBE_VLANCTL_CFIENA;
687 	vlnctrl |= NGBE_VLANCTL_VFE;
688 	wr32(hw, NGBE_VLANCTL, vlnctrl);
689 
690 	/* write whatever is in local vfta copy */
691 	for (i = 0; i < NGBE_VFTA_SIZE; i++)
692 		wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
693 }
694 
695 void
696 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
697 {
698 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
699 	struct ngbe_rx_queue *rxq;
700 
701 	if (queue >= NGBE_MAX_RX_QUEUE_NUM)
702 		return;
703 
704 	if (on)
705 		NGBE_SET_HWSTRIP(hwstrip, queue);
706 	else
707 		NGBE_CLEAR_HWSTRIP(hwstrip, queue);
708 
709 	if (queue >= dev->data->nb_rx_queues)
710 		return;
711 
712 	rxq = dev->data->rx_queues[queue];
713 
714 	if (on) {
715 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
716 		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
717 	} else {
718 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
719 		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
720 	}
721 }
722 
723 static void
724 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
725 {
726 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
727 	uint32_t ctrl;
728 
729 	PMD_INIT_FUNC_TRACE();
730 
731 	ctrl = rd32(hw, NGBE_RXCFG(queue));
732 	ctrl &= ~NGBE_RXCFG_VLAN;
733 	wr32(hw, NGBE_RXCFG(queue), ctrl);
734 
735 	/* record those setting for HW strip per queue */
736 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
737 }
738 
739 static void
740 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
741 {
742 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
743 	uint32_t ctrl;
744 
745 	PMD_INIT_FUNC_TRACE();
746 
747 	ctrl = rd32(hw, NGBE_RXCFG(queue));
748 	ctrl |= NGBE_RXCFG_VLAN;
749 	wr32(hw, NGBE_RXCFG(queue), ctrl);
750 
751 	/* record those setting for HW strip per queue */
752 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
753 }
754 
755 static void
756 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
757 {
758 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
759 	uint32_t ctrl;
760 
761 	PMD_INIT_FUNC_TRACE();
762 
763 	ctrl = rd32(hw, NGBE_PORTCTL);
764 	ctrl &= ~NGBE_PORTCTL_VLANEXT;
765 	ctrl &= ~NGBE_PORTCTL_QINQ;
766 	wr32(hw, NGBE_PORTCTL, ctrl);
767 }
768 
769 static void
770 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
771 {
772 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
773 	uint32_t ctrl;
774 
775 	PMD_INIT_FUNC_TRACE();
776 
777 	ctrl  = rd32(hw, NGBE_PORTCTL);
778 	ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
779 	wr32(hw, NGBE_PORTCTL, ctrl);
780 }
781 
782 static void
783 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
784 {
785 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
786 	uint32_t ctrl;
787 
788 	PMD_INIT_FUNC_TRACE();
789 
790 	ctrl = rd32(hw, NGBE_PORTCTL);
791 	ctrl &= ~NGBE_PORTCTL_QINQ;
792 	wr32(hw, NGBE_PORTCTL, ctrl);
793 }
794 
795 static void
796 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
797 {
798 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
799 	uint32_t ctrl;
800 
801 	PMD_INIT_FUNC_TRACE();
802 
803 	ctrl  = rd32(hw, NGBE_PORTCTL);
804 	ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
805 	wr32(hw, NGBE_PORTCTL, ctrl);
806 }
807 
808 void
809 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
810 {
811 	struct ngbe_rx_queue *rxq;
812 	uint16_t i;
813 
814 	PMD_INIT_FUNC_TRACE();
815 
816 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
817 		rxq = dev->data->rx_queues[i];
818 
819 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
820 			ngbe_vlan_hw_strip_enable(dev, i);
821 		else
822 			ngbe_vlan_hw_strip_disable(dev, i);
823 	}
824 }
825 
826 void
827 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
828 {
829 	uint16_t i;
830 	struct rte_eth_rxmode *rxmode;
831 	struct ngbe_rx_queue *rxq;
832 
833 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
834 		rxmode = &dev->data->dev_conf.rxmode;
835 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
836 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
837 				rxq = dev->data->rx_queues[i];
838 				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
839 			}
840 		else
841 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
842 				rxq = dev->data->rx_queues[i];
843 				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
844 			}
845 	}
846 }
847 
848 static int
849 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
850 {
851 	struct rte_eth_rxmode *rxmode;
852 	rxmode = &dev->data->dev_conf.rxmode;
853 
854 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
855 		ngbe_vlan_hw_strip_config(dev);
856 
857 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
858 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
859 			ngbe_vlan_hw_filter_enable(dev);
860 		else
861 			ngbe_vlan_hw_filter_disable(dev);
862 	}
863 
864 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
865 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
866 			ngbe_vlan_hw_extend_enable(dev);
867 		else
868 			ngbe_vlan_hw_extend_disable(dev);
869 	}
870 
871 	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
872 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
873 			ngbe_qinq_hw_strip_enable(dev);
874 		else
875 			ngbe_qinq_hw_strip_disable(dev);
876 	}
877 
878 	return 0;
879 }
880 
881 static int
882 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
883 {
884 	ngbe_config_vlan_strip_on_all_queues(dev, mask);
885 
886 	ngbe_vlan_offload_config(dev, mask);
887 
888 	return 0;
889 }
890 
891 static int
892 ngbe_dev_configure(struct rte_eth_dev *dev)
893 {
894 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
895 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
896 
897 	PMD_INIT_FUNC_TRACE();
898 
899 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
900 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
901 
902 	/* set flag to update link status after init */
903 	intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
904 
905 	/*
906 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
907 	 * allocation Rx preconditions we will reset it.
908 	 */
909 	adapter->rx_bulk_alloc_allowed = true;
910 
911 	return 0;
912 }
913 
914 static void
915 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
916 {
917 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
918 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
919 
920 	wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
921 	wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
922 	wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
923 	if (hw->phy.type == ngbe_phy_yt8521s_sfi)
924 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
925 	else
926 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
927 
928 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
929 }
930 
931 /*
932  * Configure device link speed and setup link.
933  * It returns 0 on success.
934  */
935 static int
936 ngbe_dev_start(struct rte_eth_dev *dev)
937 {
938 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
939 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
940 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
941 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
942 	uint32_t intr_vector = 0;
943 	int err;
944 	bool link_up = false, negotiate = false;
945 	uint32_t speed = 0;
946 	uint32_t allowed_speeds = 0;
947 	int mask = 0;
948 	int status;
949 	uint32_t *link_speeds;
950 
951 	PMD_INIT_FUNC_TRACE();
952 
953 	/* Stop the link setup handler before resetting the HW. */
954 	rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev);
955 
956 	/* disable uio/vfio intr/eventfd mapping */
957 	rte_intr_disable(intr_handle);
958 
959 	/* stop adapter */
960 	hw->adapter_stopped = 0;
961 
962 	/* reinitialize adapter, this calls reset and start */
963 	hw->nb_rx_queues = dev->data->nb_rx_queues;
964 	hw->nb_tx_queues = dev->data->nb_tx_queues;
965 	status = ngbe_pf_reset_hw(hw);
966 	if (status != 0)
967 		return -1;
968 	hw->mac.start_hw(hw);
969 	hw->mac.get_link_status = true;
970 
971 	ngbe_set_pcie_master(hw, true);
972 
973 	/* configure PF module if SRIOV enabled */
974 	ngbe_pf_host_configure(dev);
975 
976 	ngbe_dev_phy_intr_setup(dev);
977 
978 	/* check and configure queue intr-vector mapping */
979 	if ((rte_intr_cap_multiple(intr_handle) ||
980 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
981 	    dev->data->dev_conf.intr_conf.rxq != 0) {
982 		intr_vector = dev->data->nb_rx_queues;
983 		if (rte_intr_efd_enable(intr_handle, intr_vector))
984 			return -1;
985 	}
986 
987 	if (rte_intr_dp_is_en(intr_handle)) {
988 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
989 						   dev->data->nb_rx_queues)) {
990 			PMD_INIT_LOG(ERR,
991 				     "Failed to allocate %d rx_queues intr_vec",
992 				     dev->data->nb_rx_queues);
993 			return -ENOMEM;
994 		}
995 	}
996 
997 	/* configure MSI-X for sleep until Rx interrupt */
998 	ngbe_configure_msix(dev);
999 
1000 	/* initialize transmission unit */
1001 	ngbe_dev_tx_init(dev);
1002 
1003 	/* This can fail when allocating mbufs for descriptor rings */
1004 	err = ngbe_dev_rx_init(dev);
1005 	if (err != 0) {
1006 		PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
1007 		goto error;
1008 	}
1009 
1010 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1011 		RTE_ETH_VLAN_EXTEND_MASK;
1012 	err = ngbe_vlan_offload_config(dev, mask);
1013 	if (err != 0) {
1014 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1015 		goto error;
1016 	}
1017 
1018 	hw->mac.setup_pba(hw);
1019 	ngbe_configure_port(dev);
1020 
1021 	err = ngbe_dev_rxtx_start(dev);
1022 	if (err < 0) {
1023 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1024 		goto error;
1025 	}
1026 
1027 	/* Skip link setup if loopback mode is enabled. */
1028 	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1029 		goto skip_link_setup;
1030 
1031 	err = hw->mac.check_link(hw, &speed, &link_up, 0);
1032 	if (err != 0)
1033 		goto error;
1034 	dev->data->dev_link.link_status = link_up;
1035 
1036 	link_speeds = &dev->data->dev_conf.link_speeds;
1037 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
1038 		negotiate = true;
1039 
1040 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1041 	if (err != 0)
1042 		goto error;
1043 
1044 	allowed_speeds = 0;
1045 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1046 		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
1047 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1048 		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
1049 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1050 		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
1051 
1052 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
1053 		PMD_INIT_LOG(ERR, "Invalid link setting");
1054 		goto error;
1055 	}
1056 
1057 	speed = 0x0;
1058 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1059 		speed = hw->mac.default_speeds;
1060 	} else {
1061 		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1062 			speed |= NGBE_LINK_SPEED_1GB_FULL;
1063 		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1064 			speed |= NGBE_LINK_SPEED_100M_FULL;
1065 		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
1066 			speed |= NGBE_LINK_SPEED_10M_FULL;
1067 	}
1068 
1069 	err = hw->phy.init_hw(hw);
1070 	if (err != 0) {
1071 		PMD_INIT_LOG(ERR, "PHY init failed");
1072 		goto error;
1073 	}
1074 	err = hw->mac.setup_link(hw, speed, link_up);
1075 	if (err != 0)
1076 		goto error;
1077 
1078 skip_link_setup:
1079 
1080 	if (rte_intr_allow_others(intr_handle)) {
1081 		ngbe_dev_misc_interrupt_setup(dev);
1082 		/* check if lsc interrupt is enabled */
1083 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1084 			ngbe_dev_lsc_interrupt_setup(dev, TRUE);
1085 		else
1086 			ngbe_dev_lsc_interrupt_setup(dev, FALSE);
1087 		ngbe_dev_macsec_interrupt_setup(dev);
1088 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1089 	} else {
1090 		rte_intr_callback_unregister(intr_handle,
1091 					     ngbe_dev_interrupt_handler, dev);
1092 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1093 			PMD_INIT_LOG(INFO,
1094 				     "LSC won't enable because of no intr multiplex");
1095 	}
1096 
1097 	/* check if rxq interrupt is enabled */
1098 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1099 	    rte_intr_dp_is_en(intr_handle))
1100 		ngbe_dev_rxq_interrupt_setup(dev);
1101 
1102 	/* enable UIO/VFIO intr/eventfd mapping */
1103 	rte_intr_enable(intr_handle);
1104 
1105 	/* resume enabled intr since HW reset */
1106 	ngbe_enable_intr(dev);
1107 
1108 	if (hw->gpio_ctl) {
1109 		/* gpio0 is used to power on/off control*/
1110 		wr32(hw, NGBE_GPIODATA, 0);
1111 	}
1112 
1113 	/*
1114 	 * Update link status right before return, because it may
1115 	 * start link configuration process in a separate thread.
1116 	 */
1117 	ngbe_dev_link_update(dev, 0);
1118 
1119 	ngbe_read_stats_registers(hw, hw_stats);
1120 	hw->offset_loaded = 1;
1121 
1122 	return 0;
1123 
1124 error:
1125 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1126 	ngbe_dev_clear_queues(dev);
1127 	return -EIO;
1128 }
1129 
1130 /*
1131  * Stop device: disable rx and tx functions to allow for reconfiguring.
1132  */
1133 static int
1134 ngbe_dev_stop(struct rte_eth_dev *dev)
1135 {
1136 	struct rte_eth_link link;
1137 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1138 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1139 	struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1140 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1141 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1142 	int vf;
1143 
1144 	if (hw->adapter_stopped)
1145 		return 0;
1146 
1147 	PMD_INIT_FUNC_TRACE();
1148 
1149 	rte_eal_alarm_cancel(ngbe_dev_setup_link_alarm_handler, dev);
1150 
1151 	if (hw->gpio_ctl) {
1152 		/* gpio0 is used to power on/off control*/
1153 		wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1154 	}
1155 
1156 	/* disable interrupts */
1157 	ngbe_disable_intr(hw);
1158 
1159 	/* reset the NIC */
1160 	ngbe_pf_reset_hw(hw);
1161 	hw->adapter_stopped = 0;
1162 
1163 	/* stop adapter */
1164 	ngbe_stop_hw(hw);
1165 
1166 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1167 		vfinfo[vf].clear_to_send = false;
1168 
1169 	hw->phy.set_phy_power(hw, false);
1170 
1171 	ngbe_dev_clear_queues(dev);
1172 
1173 	/* Clear stored conf */
1174 	dev->data->scattered_rx = 0;
1175 
1176 	/* Clear recorded link status */
1177 	memset(&link, 0, sizeof(link));
1178 	rte_eth_linkstatus_set(dev, &link);
1179 
1180 	if (!rte_intr_allow_others(intr_handle))
1181 		/* resume to the default handler */
1182 		rte_intr_callback_register(intr_handle,
1183 					   ngbe_dev_interrupt_handler,
1184 					   (void *)dev);
1185 
1186 	/* Clean datapath event and queue/vec mapping */
1187 	rte_intr_efd_disable(intr_handle);
1188 	rte_intr_vec_list_free(intr_handle);
1189 
1190 	ngbe_set_pcie_master(hw, true);
1191 
1192 	adapter->rss_reta_updated = 0;
1193 
1194 	hw->adapter_stopped = true;
1195 	dev->data->dev_started = 0;
1196 
1197 	return 0;
1198 }
1199 
1200 /*
1201  * Reset and stop device.
1202  */
1203 static int
1204 ngbe_dev_close(struct rte_eth_dev *dev)
1205 {
1206 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1207 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1208 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1209 	int retries = 0;
1210 	int ret;
1211 
1212 	PMD_INIT_FUNC_TRACE();
1213 
1214 	ngbe_pf_reset_hw(hw);
1215 
1216 	ngbe_dev_stop(dev);
1217 
1218 	ngbe_dev_free_queues(dev);
1219 
1220 	ngbe_set_pcie_master(hw, false);
1221 
1222 	/* reprogram the RAR[0] in case user changed it. */
1223 	ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1224 
1225 	/* Unlock any pending hardware semaphore */
1226 	ngbe_swfw_lock_reset(hw);
1227 
1228 	/* disable uio intr before callback unregister */
1229 	rte_intr_disable(intr_handle);
1230 
1231 	do {
1232 		ret = rte_intr_callback_unregister(intr_handle,
1233 				ngbe_dev_interrupt_handler, dev);
1234 		if (ret >= 0 || ret == -ENOENT) {
1235 			break;
1236 		} else if (ret != -EAGAIN) {
1237 			PMD_INIT_LOG(ERR,
1238 				"intr callback unregister failed: %d",
1239 				ret);
1240 		}
1241 		rte_delay_ms(100);
1242 	} while (retries++ < (10 + NGBE_LINK_UP_TIME));
1243 
1244 	/* uninitialize PF if max_vfs not zero */
1245 	ngbe_pf_host_uninit(dev);
1246 
1247 	rte_free(dev->data->mac_addrs);
1248 	dev->data->mac_addrs = NULL;
1249 
1250 	rte_free(dev->data->hash_mac_addrs);
1251 	dev->data->hash_mac_addrs = NULL;
1252 
1253 	return ret;
1254 }
1255 
1256 /*
1257  * Reset PF device.
1258  */
1259 static int
1260 ngbe_dev_reset(struct rte_eth_dev *dev)
1261 {
1262 	int ret;
1263 
1264 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
1265 	 * its VF to make them align with it. The detailed notification
1266 	 * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1267 	 * To avoid unexpected behavior in VF, currently reset of PF with
1268 	 * SR-IOV activation is not supported. It might be supported later.
1269 	 */
1270 	if (dev->data->sriov.active)
1271 		return -ENOTSUP;
1272 
1273 	ret = eth_ngbe_dev_uninit(dev);
1274 	if (ret != 0)
1275 		return ret;
1276 
1277 	ret = eth_ngbe_dev_init(dev, NULL);
1278 
1279 	return ret;
1280 }
1281 
1282 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1283 	{                                                       \
1284 		uint32_t current_counter = rd32(hw, reg);       \
1285 		if (current_counter < last_counter)             \
1286 			current_counter += 0x100000000LL;       \
1287 		if (!hw->offset_loaded)                         \
1288 			last_counter = current_counter;         \
1289 		counter = current_counter - last_counter;       \
1290 		counter &= 0xFFFFFFFFLL;                        \
1291 	}
1292 
1293 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1294 	{                                                                \
1295 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1296 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1297 		uint64_t current_counter = (current_counter_msb << 32) | \
1298 			current_counter_lsb;                             \
1299 		if (current_counter < last_counter)                      \
1300 			current_counter += 0x1000000000LL;               \
1301 		if (!hw->offset_loaded)                                  \
1302 			last_counter = current_counter;                  \
1303 		counter = current_counter - last_counter;                \
1304 		counter &= 0xFFFFFFFFFLL;                                \
1305 	}
1306 
1307 void
1308 ngbe_read_stats_registers(struct ngbe_hw *hw,
1309 			   struct ngbe_hw_stats *hw_stats)
1310 {
1311 	unsigned int i;
1312 
1313 	/* QP Stats */
1314 	for (i = 0; i < hw->nb_rx_queues; i++) {
1315 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1316 			hw->qp_last[i].rx_qp_packets,
1317 			hw_stats->qp[i].rx_qp_packets);
1318 		UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1319 			hw->qp_last[i].rx_qp_bytes,
1320 			hw_stats->qp[i].rx_qp_bytes);
1321 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1322 			hw->qp_last[i].rx_qp_mc_packets,
1323 			hw_stats->qp[i].rx_qp_mc_packets);
1324 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1325 			hw->qp_last[i].rx_qp_bc_packets,
1326 			hw_stats->qp[i].rx_qp_bc_packets);
1327 	}
1328 
1329 	for (i = 0; i < hw->nb_tx_queues; i++) {
1330 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1331 			hw->qp_last[i].tx_qp_packets,
1332 			hw_stats->qp[i].tx_qp_packets);
1333 		UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1334 			hw->qp_last[i].tx_qp_bytes,
1335 			hw_stats->qp[i].tx_qp_bytes);
1336 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1337 			hw->qp_last[i].tx_qp_mc_packets,
1338 			hw_stats->qp[i].tx_qp_mc_packets);
1339 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1340 			hw->qp_last[i].tx_qp_bc_packets,
1341 			hw_stats->qp[i].tx_qp_bc_packets);
1342 	}
1343 
1344 	/* PB Stats */
1345 	hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1346 	hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1347 	hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1348 	hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1349 	hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1350 	hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1351 
1352 	hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1353 	hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1354 
1355 	/* DMA Stats */
1356 	hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
1357 	hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
1358 	hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1359 	hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1360 	hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1361 	hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1362 	hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1363 	hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1364 
1365 	/* MAC Stats */
1366 	hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1367 	hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1368 	hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1369 
1370 	hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1371 	hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1372 	hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1373 
1374 	hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1375 	hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1376 
1377 	hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1378 	hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1379 	hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1380 	hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1381 	hw_stats->rx_size_512_to_1023_packets +=
1382 			rd64(hw, NGBE_MACRX512TO1023L);
1383 	hw_stats->rx_size_1024_to_max_packets +=
1384 			rd64(hw, NGBE_MACRX1024TOMAXL);
1385 	hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1386 	hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1387 	hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1388 	hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1389 	hw_stats->tx_size_512_to_1023_packets +=
1390 			rd64(hw, NGBE_MACTX512TO1023L);
1391 	hw_stats->tx_size_1024_to_max_packets +=
1392 			rd64(hw, NGBE_MACTX1024TOMAXL);
1393 
1394 	hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1395 	hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
1396 	hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1397 
1398 	/* MNG Stats */
1399 	hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1400 	hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1401 	hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1402 	hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1403 
1404 	/* MACsec Stats */
1405 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1406 	hw_stats->tx_macsec_pkts_encrypted +=
1407 			rd32(hw, NGBE_LSECTX_ENCPKT);
1408 	hw_stats->tx_macsec_pkts_protected +=
1409 			rd32(hw, NGBE_LSECTX_PROTPKT);
1410 	hw_stats->tx_macsec_octets_encrypted +=
1411 			rd32(hw, NGBE_LSECTX_ENCOCT);
1412 	hw_stats->tx_macsec_octets_protected +=
1413 			rd32(hw, NGBE_LSECTX_PROTOCT);
1414 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1415 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1416 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1417 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1418 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1419 	hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1420 	hw_stats->rx_macsec_sc_pkts_unchecked +=
1421 			rd32(hw, NGBE_LSECRX_UNCHKPKT);
1422 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1423 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1424 	for (i = 0; i < 2; i++) {
1425 		hw_stats->rx_macsec_sa_pkts_ok +=
1426 			rd32(hw, NGBE_LSECRX_OKPKT(i));
1427 		hw_stats->rx_macsec_sa_pkts_invalid +=
1428 			rd32(hw, NGBE_LSECRX_INVPKT(i));
1429 		hw_stats->rx_macsec_sa_pkts_notvalid +=
1430 			rd32(hw, NGBE_LSECRX_BADPKT(i));
1431 	}
1432 	for (i = 0; i < 4; i++) {
1433 		hw_stats->rx_macsec_sa_pkts_unusedsa +=
1434 			rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1435 		hw_stats->rx_macsec_sa_pkts_notusingsa +=
1436 			rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1437 	}
1438 	hw_stats->rx_total_missed_packets =
1439 			hw_stats->rx_up_dropped;
1440 }
1441 
1442 static int
1443 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1444 {
1445 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1446 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1447 	struct ngbe_stat_mappings *stat_mappings =
1448 			NGBE_DEV_STAT_MAPPINGS(dev);
1449 	uint32_t i, j;
1450 
1451 	ngbe_read_stats_registers(hw, hw_stats);
1452 
1453 	if (stats == NULL)
1454 		return -EINVAL;
1455 
1456 	/* Fill out the rte_eth_stats statistics structure */
1457 	stats->ipackets = hw_stats->rx_packets;
1458 	stats->ibytes = hw_stats->rx_bytes;
1459 	stats->opackets = hw_stats->tx_packets;
1460 	stats->obytes = hw_stats->tx_bytes;
1461 
1462 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1463 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1464 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1465 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1466 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1467 	for (i = 0; i < NGBE_MAX_QP; i++) {
1468 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1469 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1470 		uint32_t q_map;
1471 
1472 		q_map = (stat_mappings->rqsm[n] >> offset)
1473 				& QMAP_FIELD_RESERVED_BITS_MASK;
1474 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1475 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1476 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1477 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1478 
1479 		q_map = (stat_mappings->tqsm[n] >> offset)
1480 				& QMAP_FIELD_RESERVED_BITS_MASK;
1481 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1482 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1483 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1484 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1485 	}
1486 
1487 	/* Rx Errors */
1488 	stats->imissed  = hw_stats->rx_total_missed_packets +
1489 			  hw_stats->rx_dma_drop;
1490 	stats->ierrors  = hw_stats->rx_crc_errors +
1491 			  hw_stats->rx_mac_short_packet_dropped +
1492 			  hw_stats->rx_length_errors +
1493 			  hw_stats->rx_undersize_errors +
1494 			  hw_stats->rx_oversize_errors +
1495 			  hw_stats->rx_illegal_byte_errors +
1496 			  hw_stats->rx_error_bytes +
1497 			  hw_stats->rx_fragment_errors;
1498 
1499 	/* Tx Errors */
1500 	stats->oerrors  = 0;
1501 	return 0;
1502 }
1503 
1504 static int
1505 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1506 {
1507 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1508 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1509 
1510 	/* HW registers are cleared on read */
1511 	hw->offset_loaded = 0;
1512 	ngbe_dev_stats_get(dev, NULL);
1513 	hw->offset_loaded = 1;
1514 
1515 	/* Reset software totals */
1516 	memset(hw_stats, 0, sizeof(*hw_stats));
1517 
1518 	return 0;
1519 }
1520 
1521 /* This function calculates the number of xstats based on the current config */
1522 static unsigned
1523 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1524 {
1525 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1526 	return NGBE_NB_HW_STATS +
1527 	       NGBE_NB_QP_STATS * nb_queues;
1528 }
1529 
1530 static inline int
1531 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1532 {
1533 	int nb, st;
1534 
1535 	/* Extended stats from ngbe_hw_stats */
1536 	if (id < NGBE_NB_HW_STATS) {
1537 		snprintf(name, size, "[hw]%s",
1538 			rte_ngbe_stats_strings[id].name);
1539 		return 0;
1540 	}
1541 	id -= NGBE_NB_HW_STATS;
1542 
1543 	/* Queue Stats */
1544 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1545 		nb = id / NGBE_NB_QP_STATS;
1546 		st = id % NGBE_NB_QP_STATS;
1547 		snprintf(name, size, "[q%u]%s", nb,
1548 			rte_ngbe_qp_strings[st].name);
1549 		return 0;
1550 	}
1551 	id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1552 
1553 	return -(int)(id + 1);
1554 }
1555 
1556 static inline int
1557 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1558 {
1559 	int nb, st;
1560 
1561 	/* Extended stats from ngbe_hw_stats */
1562 	if (id < NGBE_NB_HW_STATS) {
1563 		*offset = rte_ngbe_stats_strings[id].offset;
1564 		return 0;
1565 	}
1566 	id -= NGBE_NB_HW_STATS;
1567 
1568 	/* Queue Stats */
1569 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1570 		nb = id / NGBE_NB_QP_STATS;
1571 		st = id % NGBE_NB_QP_STATS;
1572 		*offset = rte_ngbe_qp_strings[st].offset +
1573 			nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1574 		return 0;
1575 	}
1576 
1577 	return -1;
1578 }
1579 
1580 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1581 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1582 {
1583 	unsigned int i, count;
1584 
1585 	count = ngbe_xstats_calc_num(dev);
1586 	if (xstats_names == NULL)
1587 		return count;
1588 
1589 	/* Note: limit >= cnt_stats checked upstream
1590 	 * in rte_eth_xstats_names()
1591 	 */
1592 	limit = min(limit, count);
1593 
1594 	/* Extended stats from ngbe_hw_stats */
1595 	for (i = 0; i < limit; i++) {
1596 		if (ngbe_get_name_by_id(i, xstats_names[i].name,
1597 			sizeof(xstats_names[i].name))) {
1598 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1599 			break;
1600 		}
1601 	}
1602 
1603 	return i;
1604 }
1605 
1606 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1607 	const uint64_t *ids,
1608 	struct rte_eth_xstat_name *xstats_names,
1609 	unsigned int limit)
1610 {
1611 	unsigned int i;
1612 
1613 	if (ids == NULL)
1614 		return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1615 
1616 	for (i = 0; i < limit; i++) {
1617 		if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1618 				sizeof(xstats_names[i].name))) {
1619 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1620 			return -1;
1621 		}
1622 	}
1623 
1624 	return i;
1625 }
1626 
1627 static int
1628 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1629 					 unsigned int limit)
1630 {
1631 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1632 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1633 	unsigned int i, count;
1634 
1635 	ngbe_read_stats_registers(hw, hw_stats);
1636 
1637 	/* If this is a reset xstats is NULL, and we have cleared the
1638 	 * registers by reading them.
1639 	 */
1640 	count = ngbe_xstats_calc_num(dev);
1641 	if (xstats == NULL)
1642 		return count;
1643 
1644 	limit = min(limit, ngbe_xstats_calc_num(dev));
1645 
1646 	/* Extended stats from ngbe_hw_stats */
1647 	for (i = 0; i < limit; i++) {
1648 		uint32_t offset = 0;
1649 
1650 		if (ngbe_get_offset_by_id(i, &offset)) {
1651 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1652 			break;
1653 		}
1654 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1655 		xstats[i].id = i;
1656 	}
1657 
1658 	return i;
1659 }
1660 
1661 static int
1662 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1663 					 unsigned int limit)
1664 {
1665 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1666 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1667 	unsigned int i, count;
1668 
1669 	ngbe_read_stats_registers(hw, hw_stats);
1670 
1671 	/* If this is a reset xstats is NULL, and we have cleared the
1672 	 * registers by reading them.
1673 	 */
1674 	count = ngbe_xstats_calc_num(dev);
1675 	if (values == NULL)
1676 		return count;
1677 
1678 	limit = min(limit, ngbe_xstats_calc_num(dev));
1679 
1680 	/* Extended stats from ngbe_hw_stats */
1681 	for (i = 0; i < limit; i++) {
1682 		uint32_t offset;
1683 
1684 		if (ngbe_get_offset_by_id(i, &offset)) {
1685 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1686 			break;
1687 		}
1688 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1689 	}
1690 
1691 	return i;
1692 }
1693 
1694 static int
1695 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1696 		uint64_t *values, unsigned int limit)
1697 {
1698 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1699 	unsigned int i;
1700 
1701 	if (ids == NULL)
1702 		return ngbe_dev_xstats_get_(dev, values, limit);
1703 
1704 	for (i = 0; i < limit; i++) {
1705 		uint32_t offset;
1706 
1707 		if (ngbe_get_offset_by_id(ids[i], &offset)) {
1708 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1709 			break;
1710 		}
1711 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1712 	}
1713 
1714 	return i;
1715 }
1716 
1717 static int
1718 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1719 {
1720 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1721 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1722 
1723 	/* HW registers are cleared on read */
1724 	hw->offset_loaded = 0;
1725 	ngbe_read_stats_registers(hw, hw_stats);
1726 	hw->offset_loaded = 1;
1727 
1728 	/* Reset software totals */
1729 	memset(hw_stats, 0, sizeof(*hw_stats));
1730 
1731 	return 0;
1732 }
1733 
1734 static int
1735 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1736 {
1737 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1738 	int ret;
1739 
1740 	ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1741 
1742 	if (ret < 0)
1743 		return -EINVAL;
1744 
1745 	ret += 1; /* add the size of '\0' */
1746 	if (fw_size < (size_t)ret)
1747 		return ret;
1748 
1749 	return 0;
1750 }
1751 
1752 static int
1753 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1754 {
1755 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1756 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1757 
1758 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1759 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1760 	dev_info->min_rx_bufsize = 1024;
1761 	dev_info->max_rx_pktlen = 15872;
1762 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1763 	dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1764 	dev_info->max_vfs = pci_dev->max_vfs;
1765 	dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1766 	dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1767 				     dev_info->rx_queue_offload_capa);
1768 	dev_info->tx_queue_offload_capa = 0;
1769 	dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1770 
1771 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1772 		.rx_thresh = {
1773 			.pthresh = NGBE_DEFAULT_RX_PTHRESH,
1774 			.hthresh = NGBE_DEFAULT_RX_HTHRESH,
1775 			.wthresh = NGBE_DEFAULT_RX_WTHRESH,
1776 		},
1777 		.rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1778 		.rx_drop_en = 0,
1779 		.offloads = 0,
1780 	};
1781 
1782 	dev_info->default_txconf = (struct rte_eth_txconf) {
1783 		.tx_thresh = {
1784 			.pthresh = NGBE_DEFAULT_TX_PTHRESH,
1785 			.hthresh = NGBE_DEFAULT_TX_HTHRESH,
1786 			.wthresh = NGBE_DEFAULT_TX_WTHRESH,
1787 		},
1788 		.tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1789 		.offloads = 0,
1790 	};
1791 
1792 	dev_info->rx_desc_lim = rx_desc_lim;
1793 	dev_info->tx_desc_lim = tx_desc_lim;
1794 
1795 	dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1796 	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1797 	dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1798 
1799 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1800 				RTE_ETH_LINK_SPEED_10M;
1801 
1802 	/* Driver-preferred Rx/Tx parameters */
1803 	dev_info->default_rxportconf.burst_size = 32;
1804 	dev_info->default_txportconf.burst_size = 32;
1805 	dev_info->default_rxportconf.nb_queues = 1;
1806 	dev_info->default_txportconf.nb_queues = 1;
1807 	dev_info->default_rxportconf.ring_size = 256;
1808 	dev_info->default_txportconf.ring_size = 256;
1809 
1810 	return 0;
1811 }
1812 
1813 const uint32_t *
1814 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1815 {
1816 	if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1817 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1818 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1819 	    dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1820 		return ngbe_get_supported_ptypes();
1821 
1822 	return NULL;
1823 }
1824 
1825 void
1826 ngbe_dev_setup_link_alarm_handler(void *param)
1827 {
1828 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1829 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1830 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1831 	u32 speed;
1832 	bool autoneg = false;
1833 
1834 	speed = hw->phy.autoneg_advertised;
1835 	if (!speed)
1836 		hw->mac.get_link_capabilities(hw, &speed, &autoneg);
1837 
1838 	hw->mac.setup_link(hw, speed, true);
1839 
1840 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1841 }
1842 
1843 /* return 0 means link status changed, -1 means not changed */
1844 int
1845 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1846 			    int wait_to_complete)
1847 {
1848 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1849 	struct rte_eth_link link;
1850 	u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1851 	u32 lan_speed = 0;
1852 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1853 	bool link_up;
1854 	int err;
1855 	int wait = 1;
1856 
1857 	memset(&link, 0, sizeof(link));
1858 	link.link_status = RTE_ETH_LINK_DOWN;
1859 	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1860 	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1861 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1862 			~RTE_ETH_LINK_SPEED_AUTONEG);
1863 
1864 	hw->mac.get_link_status = true;
1865 
1866 	if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
1867 		return rte_eth_linkstatus_set(dev, &link);
1868 
1869 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
1870 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1871 		wait = 0;
1872 
1873 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1874 	if (err != 0) {
1875 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1876 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1877 		return rte_eth_linkstatus_set(dev, &link);
1878 	}
1879 
1880 	if (!link_up)
1881 		return rte_eth_linkstatus_set(dev, &link);
1882 
1883 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
1884 	link.link_status = RTE_ETH_LINK_UP;
1885 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1886 
1887 	switch (link_speed) {
1888 	default:
1889 	case NGBE_LINK_SPEED_UNKNOWN:
1890 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1891 		break;
1892 
1893 	case NGBE_LINK_SPEED_10M_FULL:
1894 		link.link_speed = RTE_ETH_SPEED_NUM_10M;
1895 		lan_speed = 0;
1896 		break;
1897 
1898 	case NGBE_LINK_SPEED_100M_FULL:
1899 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
1900 		lan_speed = 1;
1901 		break;
1902 
1903 	case NGBE_LINK_SPEED_1GB_FULL:
1904 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
1905 		lan_speed = 2;
1906 		break;
1907 	}
1908 
1909 	if (hw->is_pf) {
1910 		wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1911 		if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1912 				NGBE_LINK_SPEED_100M_FULL |
1913 				NGBE_LINK_SPEED_10M_FULL)) {
1914 			wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1915 				NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1916 		}
1917 	}
1918 
1919 	return rte_eth_linkstatus_set(dev, &link);
1920 }
1921 
1922 static int
1923 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1924 {
1925 	return ngbe_dev_link_update_share(dev, wait_to_complete);
1926 }
1927 
1928 static int
1929 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1930 {
1931 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1932 	uint32_t fctrl;
1933 
1934 	fctrl = rd32(hw, NGBE_PSRCTL);
1935 	fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1936 	wr32(hw, NGBE_PSRCTL, fctrl);
1937 
1938 	return 0;
1939 }
1940 
1941 static int
1942 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1943 {
1944 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1945 	uint32_t fctrl;
1946 
1947 	fctrl = rd32(hw, NGBE_PSRCTL);
1948 	fctrl &= (~NGBE_PSRCTL_UCP);
1949 	if (dev->data->all_multicast == 1)
1950 		fctrl |= NGBE_PSRCTL_MCP;
1951 	else
1952 		fctrl &= (~NGBE_PSRCTL_MCP);
1953 	wr32(hw, NGBE_PSRCTL, fctrl);
1954 
1955 	return 0;
1956 }
1957 
1958 static int
1959 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1960 {
1961 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1962 	uint32_t fctrl;
1963 
1964 	fctrl = rd32(hw, NGBE_PSRCTL);
1965 	fctrl |= NGBE_PSRCTL_MCP;
1966 	wr32(hw, NGBE_PSRCTL, fctrl);
1967 
1968 	return 0;
1969 }
1970 
1971 static int
1972 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1973 {
1974 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1975 	uint32_t fctrl;
1976 
1977 	if (dev->data->promiscuous == 1)
1978 		return 0; /* must remain in all_multicast mode */
1979 
1980 	fctrl = rd32(hw, NGBE_PSRCTL);
1981 	fctrl &= (~NGBE_PSRCTL_MCP);
1982 	wr32(hw, NGBE_PSRCTL, fctrl);
1983 
1984 	return 0;
1985 }
1986 
1987 /**
1988  * It clears the interrupt causes and enables the interrupt.
1989  * It will be called once only during NIC initialized.
1990  *
1991  * @param dev
1992  *  Pointer to struct rte_eth_dev.
1993  * @param on
1994  *  Enable or Disable.
1995  *
1996  * @return
1997  *  - On success, zero.
1998  *  - On failure, a negative value.
1999  */
2000 static int
2001 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2002 {
2003 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2004 
2005 	ngbe_dev_link_status_print(dev);
2006 	if (on != 0) {
2007 		intr->mask_misc |= NGBE_ICRMISC_PHY;
2008 		intr->mask_misc |= NGBE_ICRMISC_GPIO;
2009 	} else {
2010 		intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2011 		intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
2012 	}
2013 
2014 	return 0;
2015 }
2016 
2017 /**
2018  * It clears the interrupt causes and enables the interrupt.
2019  * It will be called once only during NIC initialized.
2020  *
2021  * @param dev
2022  *  Pointer to struct rte_eth_dev.
2023  *
2024  * @return
2025  *  - On success, zero.
2026  *  - On failure, a negative value.
2027  */
2028 static int
2029 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
2030 {
2031 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2032 	u64 mask;
2033 
2034 	mask = NGBE_ICR_MASK;
2035 	mask &= (1ULL << NGBE_MISC_VEC_ID);
2036 	intr->mask |= mask;
2037 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
2038 
2039 	return 0;
2040 }
2041 
2042 /**
2043  * It clears the interrupt causes and enables the interrupt.
2044  * It will be called once only during NIC initialized.
2045  *
2046  * @param dev
2047  *  Pointer to struct rte_eth_dev.
2048  *
2049  * @return
2050  *  - On success, zero.
2051  *  - On failure, a negative value.
2052  */
2053 static int
2054 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2055 {
2056 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2057 	u64 mask;
2058 
2059 	mask = NGBE_ICR_MASK;
2060 	mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
2061 	intr->mask |= mask;
2062 
2063 	return 0;
2064 }
2065 
2066 /**
2067  * It clears the interrupt causes and enables the interrupt.
2068  * It will be called once only during NIC initialized.
2069  *
2070  * @param dev
2071  *  Pointer to struct rte_eth_dev.
2072  *
2073  * @return
2074  *  - On success, zero.
2075  *  - On failure, a negative value.
2076  */
2077 static int
2078 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2079 {
2080 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2081 
2082 	intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
2083 
2084 	return 0;
2085 }
2086 
2087 /*
2088  * It reads ICR and sets flag for the link_update.
2089  *
2090  * @param dev
2091  *  Pointer to struct rte_eth_dev.
2092  *
2093  * @return
2094  *  - On success, zero.
2095  *  - On failure, a negative value.
2096  */
2097 static int
2098 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2099 {
2100 	uint32_t eicr;
2101 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2102 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2103 
2104 	/* read-on-clear nic registers here */
2105 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2106 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2107 
2108 	intr->flags = 0;
2109 
2110 	/* set flag for async link update */
2111 	if (eicr & NGBE_ICRMISC_PHY)
2112 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2113 
2114 	if (eicr & NGBE_ICRMISC_VFMBX)
2115 		intr->flags |= NGBE_FLAG_MAILBOX;
2116 
2117 	if (eicr & NGBE_ICRMISC_LNKSEC)
2118 		intr->flags |= NGBE_FLAG_MACSEC;
2119 
2120 	if (eicr & NGBE_ICRMISC_GPIO)
2121 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2122 
2123 	((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0;
2124 
2125 	return 0;
2126 }
2127 
2128 /**
2129  * It gets and then prints the link status.
2130  *
2131  * @param dev
2132  *  Pointer to struct rte_eth_dev.
2133  *
2134  * @return
2135  *  - On success, zero.
2136  *  - On failure, a negative value.
2137  */
2138 static void
2139 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2140 {
2141 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2142 	struct rte_eth_link link;
2143 
2144 	rte_eth_linkstatus_get(dev, &link);
2145 
2146 	if (link.link_status == RTE_ETH_LINK_UP) {
2147 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2148 					(int)(dev->data->port_id),
2149 					(unsigned int)link.link_speed,
2150 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2151 					"full-duplex" : "half-duplex");
2152 	} else {
2153 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
2154 				(int)(dev->data->port_id));
2155 	}
2156 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2157 				pci_dev->addr.domain,
2158 				pci_dev->addr.bus,
2159 				pci_dev->addr.devid,
2160 				pci_dev->addr.function);
2161 }
2162 
2163 /*
2164  * It executes link_update after knowing an interrupt occurred.
2165  *
2166  * @param dev
2167  *  Pointer to struct rte_eth_dev.
2168  *
2169  * @return
2170  *  - On success, zero.
2171  *  - On failure, a negative value.
2172  */
2173 static int
2174 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2175 {
2176 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2177 
2178 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2179 
2180 	if (intr->flags & NGBE_FLAG_MAILBOX) {
2181 		ngbe_pf_mbx_process(dev);
2182 		intr->flags &= ~NGBE_FLAG_MAILBOX;
2183 	}
2184 
2185 	if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2186 		struct rte_eth_link link;
2187 
2188 		/*get the link status before link update, for predicting later*/
2189 		rte_eth_linkstatus_get(dev, &link);
2190 
2191 		ngbe_dev_link_update(dev, 0);
2192 		intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2193 		ngbe_dev_link_status_print(dev);
2194 		if (dev->data->dev_link.link_speed != link.link_speed)
2195 			rte_eth_dev_callback_process(dev,
2196 				RTE_ETH_EVENT_INTR_LSC, NULL);
2197 	}
2198 
2199 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
2200 	ngbe_enable_intr(dev);
2201 
2202 	return 0;
2203 }
2204 
2205 /**
2206  * Interrupt handler triggered by NIC  for handling
2207  * specific interrupt.
2208  *
2209  * @param param
2210  *  The address of parameter (struct rte_eth_dev *) registered before.
2211  */
2212 static void
2213 ngbe_dev_interrupt_handler(void *param)
2214 {
2215 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2216 
2217 	ngbe_dev_interrupt_get_status(dev);
2218 	ngbe_dev_interrupt_action(dev);
2219 }
2220 
2221 static int
2222 ngbe_dev_led_on(struct rte_eth_dev *dev)
2223 {
2224 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2225 	return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
2226 }
2227 
2228 static int
2229 ngbe_dev_led_off(struct rte_eth_dev *dev)
2230 {
2231 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2232 	return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
2233 }
2234 
2235 static int
2236 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2237 {
2238 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2239 	uint32_t mflcn_reg;
2240 	uint32_t fccfg_reg;
2241 	int rx_pause;
2242 	int tx_pause;
2243 
2244 	fc_conf->pause_time = hw->fc.pause_time;
2245 	fc_conf->high_water = hw->fc.high_water;
2246 	fc_conf->low_water = hw->fc.low_water;
2247 	fc_conf->send_xon = hw->fc.send_xon;
2248 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2249 
2250 	/*
2251 	 * Return rx_pause status according to actual setting of
2252 	 * RXFCCFG register.
2253 	 */
2254 	mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2255 	if (mflcn_reg & NGBE_RXFCCFG_FC)
2256 		rx_pause = 1;
2257 	else
2258 		rx_pause = 0;
2259 
2260 	/*
2261 	 * Return tx_pause status according to actual setting of
2262 	 * TXFCCFG register.
2263 	 */
2264 	fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2265 	if (fccfg_reg & NGBE_TXFCCFG_FC)
2266 		tx_pause = 1;
2267 	else
2268 		tx_pause = 0;
2269 
2270 	if (rx_pause && tx_pause)
2271 		fc_conf->mode = RTE_ETH_FC_FULL;
2272 	else if (rx_pause)
2273 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2274 	else if (tx_pause)
2275 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2276 	else
2277 		fc_conf->mode = RTE_ETH_FC_NONE;
2278 
2279 	return 0;
2280 }
2281 
2282 static int
2283 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2284 {
2285 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2286 	int err;
2287 	uint32_t rx_buf_size;
2288 	uint32_t max_high_water;
2289 	enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2290 		ngbe_fc_none,
2291 		ngbe_fc_rx_pause,
2292 		ngbe_fc_tx_pause,
2293 		ngbe_fc_full
2294 	};
2295 
2296 	PMD_INIT_FUNC_TRACE();
2297 
2298 	rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2299 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2300 
2301 	/*
2302 	 * At least reserve one Ethernet frame for watermark
2303 	 * high_water/low_water in kilo bytes for ngbe
2304 	 */
2305 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2306 	if (fc_conf->high_water > max_high_water ||
2307 	    fc_conf->high_water < fc_conf->low_water) {
2308 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2309 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2310 		return -EINVAL;
2311 	}
2312 
2313 	hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2314 	hw->fc.pause_time     = fc_conf->pause_time;
2315 	hw->fc.high_water     = fc_conf->high_water;
2316 	hw->fc.low_water      = fc_conf->low_water;
2317 	hw->fc.send_xon       = fc_conf->send_xon;
2318 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2319 
2320 	err = hw->mac.fc_enable(hw);
2321 
2322 	/* Not negotiated is not an error case */
2323 	if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2324 		wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2325 		      (fc_conf->mac_ctrl_frame_fwd
2326 		       ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2327 		ngbe_flush(hw);
2328 
2329 		return 0;
2330 	}
2331 
2332 	PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2333 	return -EIO;
2334 }
2335 
2336 int
2337 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2338 			  struct rte_eth_rss_reta_entry64 *reta_conf,
2339 			  uint16_t reta_size)
2340 {
2341 	uint8_t i, j, mask;
2342 	uint32_t reta;
2343 	uint16_t idx, shift;
2344 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2345 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2346 
2347 	PMD_INIT_FUNC_TRACE();
2348 
2349 	if (!hw->is_pf) {
2350 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2351 			"NIC.");
2352 		return -ENOTSUP;
2353 	}
2354 
2355 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2356 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2357 			"(%d) doesn't match the number hardware can supported "
2358 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2359 		return -EINVAL;
2360 	}
2361 
2362 	for (i = 0; i < reta_size; i += 4) {
2363 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2364 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2365 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2366 		if (!mask)
2367 			continue;
2368 
2369 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2370 		for (j = 0; j < 4; j++) {
2371 			if (RS8(mask, j, 0x1)) {
2372 				reta  &= ~(MS32(8 * j, 0xFF));
2373 				reta |= LS32(reta_conf[idx].reta[shift + j],
2374 						8 * j, 0xFF);
2375 			}
2376 		}
2377 		wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2378 	}
2379 	adapter->rss_reta_updated = 1;
2380 
2381 	return 0;
2382 }
2383 
2384 int
2385 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2386 			 struct rte_eth_rss_reta_entry64 *reta_conf,
2387 			 uint16_t reta_size)
2388 {
2389 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2390 	uint8_t i, j, mask;
2391 	uint32_t reta;
2392 	uint16_t idx, shift;
2393 
2394 	PMD_INIT_FUNC_TRACE();
2395 
2396 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2397 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2398 			"(%d) doesn't match the number hardware can supported "
2399 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2400 		return -EINVAL;
2401 	}
2402 
2403 	for (i = 0; i < reta_size; i += 4) {
2404 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2405 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2406 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2407 		if (!mask)
2408 			continue;
2409 
2410 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2411 		for (j = 0; j < 4; j++) {
2412 			if (RS8(mask, j, 0x1))
2413 				reta_conf[idx].reta[shift + j] =
2414 					(uint16_t)RS32(reta, 8 * j, 0xFF);
2415 		}
2416 	}
2417 
2418 	return 0;
2419 }
2420 
2421 static int
2422 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2423 				uint32_t index, uint32_t pool)
2424 {
2425 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2426 	uint32_t enable_addr = 1;
2427 
2428 	return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2429 			     pool, enable_addr);
2430 }
2431 
2432 static void
2433 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2434 {
2435 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2436 
2437 	ngbe_clear_rar(hw, index);
2438 }
2439 
2440 static int
2441 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2442 {
2443 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2444 
2445 	ngbe_remove_rar(dev, 0);
2446 	ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2447 
2448 	return 0;
2449 }
2450 
2451 static int
2452 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2453 {
2454 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2455 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
2456 	struct rte_eth_dev_data *dev_data = dev->data;
2457 
2458 	/* If device is started, refuse mtu that requires the support of
2459 	 * scattered packets when this feature has not been enabled before.
2460 	 */
2461 	if (dev_data->dev_started && !dev_data->scattered_rx &&
2462 	    (frame_size + 2 * RTE_VLAN_HLEN >
2463 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2464 		PMD_INIT_LOG(ERR, "Stop port first.");
2465 		return -EINVAL;
2466 	}
2467 
2468 	if (hw->mode)
2469 		wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2470 			NGBE_FRAME_SIZE_MAX);
2471 	else
2472 		wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2473 			NGBE_FRMSZ_MAX(frame_size));
2474 
2475 	return 0;
2476 }
2477 
2478 static uint32_t
2479 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2480 {
2481 	uint32_t vector = 0;
2482 
2483 	switch (hw->mac.mc_filter_type) {
2484 	case 0:   /* use bits [47:36] of the address */
2485 		vector = ((uc_addr->addr_bytes[4] >> 4) |
2486 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
2487 		break;
2488 	case 1:   /* use bits [46:35] of the address */
2489 		vector = ((uc_addr->addr_bytes[4] >> 3) |
2490 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
2491 		break;
2492 	case 2:   /* use bits [45:34] of the address */
2493 		vector = ((uc_addr->addr_bytes[4] >> 2) |
2494 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
2495 		break;
2496 	case 3:   /* use bits [43:32] of the address */
2497 		vector = ((uc_addr->addr_bytes[4]) |
2498 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
2499 		break;
2500 	default:  /* Invalid mc_filter_type */
2501 		break;
2502 	}
2503 
2504 	/* vector can only be 12-bits or boundary will be exceeded */
2505 	vector &= 0xFFF;
2506 	return vector;
2507 }
2508 
2509 static int
2510 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2511 			struct rte_ether_addr *mac_addr, uint8_t on)
2512 {
2513 	uint32_t vector;
2514 	uint32_t uta_idx;
2515 	uint32_t reg_val;
2516 	uint32_t uta_mask;
2517 	uint32_t psrctl;
2518 
2519 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2520 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2521 
2522 	vector = ngbe_uta_vector(hw, mac_addr);
2523 	uta_idx = (vector >> 5) & 0x7F;
2524 	uta_mask = 0x1UL << (vector & 0x1F);
2525 
2526 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2527 		return 0;
2528 
2529 	reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2530 	if (on) {
2531 		uta_info->uta_in_use++;
2532 		reg_val |= uta_mask;
2533 		uta_info->uta_shadow[uta_idx] |= uta_mask;
2534 	} else {
2535 		uta_info->uta_in_use--;
2536 		reg_val &= ~uta_mask;
2537 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2538 	}
2539 
2540 	wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2541 
2542 	psrctl = rd32(hw, NGBE_PSRCTL);
2543 	if (uta_info->uta_in_use > 0)
2544 		psrctl |= NGBE_PSRCTL_UCHFENA;
2545 	else
2546 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2547 
2548 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2549 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2550 	wr32(hw, NGBE_PSRCTL, psrctl);
2551 
2552 	return 0;
2553 }
2554 
2555 static int
2556 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2557 {
2558 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2559 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2560 	uint32_t psrctl;
2561 	int i;
2562 
2563 	if (on) {
2564 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2565 			uta_info->uta_shadow[i] = ~0;
2566 			wr32(hw, NGBE_UCADDRTBL(i), ~0);
2567 		}
2568 	} else {
2569 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2570 			uta_info->uta_shadow[i] = 0;
2571 			wr32(hw, NGBE_UCADDRTBL(i), 0);
2572 		}
2573 	}
2574 
2575 	psrctl = rd32(hw, NGBE_PSRCTL);
2576 	if (on)
2577 		psrctl |= NGBE_PSRCTL_UCHFENA;
2578 	else
2579 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2580 
2581 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2582 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2583 	wr32(hw, NGBE_PSRCTL, psrctl);
2584 
2585 	return 0;
2586 }
2587 
2588 /**
2589  * Set the IVAR registers, mapping interrupt causes to vectors
2590  * @param hw
2591  *  pointer to ngbe_hw struct
2592  * @direction
2593  *  0 for Rx, 1 for Tx, -1 for other causes
2594  * @queue
2595  *  queue to map the corresponding interrupt to
2596  * @msix_vector
2597  *  the vector to map to the corresponding queue
2598  */
2599 void
2600 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2601 		   uint8_t queue, uint8_t msix_vector)
2602 {
2603 	uint32_t tmp, idx;
2604 
2605 	if (direction == -1) {
2606 		/* other causes */
2607 		msix_vector |= NGBE_IVARMISC_VLD;
2608 		idx = 0;
2609 		tmp = rd32(hw, NGBE_IVARMISC);
2610 		tmp &= ~(0xFF << idx);
2611 		tmp |= (msix_vector << idx);
2612 		wr32(hw, NGBE_IVARMISC, tmp);
2613 	} else {
2614 		/* rx or tx causes */
2615 		/* Workaround for ICR lost */
2616 		idx = ((16 * (queue & 1)) + (8 * direction));
2617 		tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2618 		tmp &= ~(0xFF << idx);
2619 		tmp |= (msix_vector << idx);
2620 		wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2621 	}
2622 }
2623 
2624 /**
2625  * Sets up the hardware to properly generate MSI-X interrupts
2626  * @hw
2627  *  board private structure
2628  */
2629 static void
2630 ngbe_configure_msix(struct rte_eth_dev *dev)
2631 {
2632 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2633 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2634 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2635 	uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2636 	uint32_t vec = NGBE_MISC_VEC_ID;
2637 	uint32_t gpie;
2638 
2639 	/*
2640 	 * Won't configure MSI-X register if no mapping is done
2641 	 * between intr vector and event fd
2642 	 * but if MSI-X has been enabled already, need to configure
2643 	 * auto clean, auto mask and throttling.
2644 	 */
2645 	gpie = rd32(hw, NGBE_GPIE);
2646 	if (!rte_intr_dp_is_en(intr_handle) &&
2647 	    !(gpie & NGBE_GPIE_MSIX))
2648 		return;
2649 
2650 	if (rte_intr_allow_others(intr_handle)) {
2651 		base = NGBE_RX_VEC_START;
2652 		vec = base;
2653 	}
2654 
2655 	/* setup GPIE for MSI-X mode */
2656 	gpie = rd32(hw, NGBE_GPIE);
2657 	gpie |= NGBE_GPIE_MSIX;
2658 	wr32(hw, NGBE_GPIE, gpie);
2659 
2660 	/* Populate the IVAR table and set the ITR values to the
2661 	 * corresponding register.
2662 	 */
2663 	if (rte_intr_dp_is_en(intr_handle)) {
2664 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2665 			queue_id++) {
2666 			/* by default, 1:1 mapping */
2667 			ngbe_set_ivar_map(hw, 0, queue_id, vec);
2668 			rte_intr_vec_list_index_set(intr_handle,
2669 							   queue_id, vec);
2670 			if (vec < base + rte_intr_nb_efd_get(intr_handle)
2671 			    - 1)
2672 				vec++;
2673 		}
2674 
2675 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2676 	}
2677 	wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2678 			NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2679 			| NGBE_ITR_WRDSA);
2680 }
2681 
2682 static u8 *
2683 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2684 			u8 **mc_addr_ptr, u32 *vmdq)
2685 {
2686 	u8 *mc_addr;
2687 
2688 	*vmdq = 0;
2689 	mc_addr = *mc_addr_ptr;
2690 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2691 	return mc_addr;
2692 }
2693 
2694 int
2695 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2696 			  struct rte_ether_addr *mc_addr_set,
2697 			  uint32_t nb_mc_addr)
2698 {
2699 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2700 	u8 *mc_addr_list;
2701 
2702 	mc_addr_list = (u8 *)mc_addr_set;
2703 	return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2704 					 ngbe_dev_addr_list_itr, TRUE);
2705 }
2706 
2707 static uint64_t
2708 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
2709 {
2710 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2711 	uint64_t systime_cycles;
2712 
2713 	systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
2714 	systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
2715 
2716 	return systime_cycles;
2717 }
2718 
2719 static uint64_t
2720 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2721 {
2722 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2723 	uint64_t rx_tstamp_cycles;
2724 
2725 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
2726 	rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
2727 	rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
2728 
2729 	return rx_tstamp_cycles;
2730 }
2731 
2732 static uint64_t
2733 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2734 {
2735 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2736 	uint64_t tx_tstamp_cycles;
2737 
2738 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
2739 	tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
2740 	tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
2741 
2742 	return tx_tstamp_cycles;
2743 }
2744 
2745 static void
2746 ngbe_start_timecounters(struct rte_eth_dev *dev)
2747 {
2748 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2749 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2750 	uint32_t incval = 0;
2751 	uint32_t shift = 0;
2752 
2753 	incval = NGBE_INCVAL_1GB;
2754 	shift = NGBE_INCVAL_SHIFT_1GB;
2755 
2756 	wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
2757 
2758 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
2759 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2760 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2761 
2762 	adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2763 	adapter->systime_tc.cc_shift = shift;
2764 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
2765 
2766 	adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2767 	adapter->rx_tstamp_tc.cc_shift = shift;
2768 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2769 
2770 	adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2771 	adapter->tx_tstamp_tc.cc_shift = shift;
2772 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2773 }
2774 
2775 static int
2776 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2777 {
2778 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2779 
2780 	adapter->systime_tc.nsec += delta;
2781 	adapter->rx_tstamp_tc.nsec += delta;
2782 	adapter->tx_tstamp_tc.nsec += delta;
2783 
2784 	return 0;
2785 }
2786 
2787 static int
2788 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2789 {
2790 	uint64_t ns;
2791 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2792 
2793 	ns = rte_timespec_to_ns(ts);
2794 	/* Set the timecounters to a new value. */
2795 	adapter->systime_tc.nsec = ns;
2796 	adapter->rx_tstamp_tc.nsec = ns;
2797 	adapter->tx_tstamp_tc.nsec = ns;
2798 
2799 	return 0;
2800 }
2801 
2802 static int
2803 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2804 {
2805 	uint64_t ns, systime_cycles;
2806 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2807 
2808 	systime_cycles = ngbe_read_systime_cyclecounter(dev);
2809 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
2810 	*ts = rte_ns_to_timespec(ns);
2811 
2812 	return 0;
2813 }
2814 
2815 static int
2816 ngbe_timesync_enable(struct rte_eth_dev *dev)
2817 {
2818 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2819 	uint32_t tsync_ctl;
2820 
2821 	/* Stop the timesync system time. */
2822 	wr32(hw, NGBE_TSTIMEINC, 0x0);
2823 	/* Reset the timesync system time value. */
2824 	wr32(hw, NGBE_TSTIMEL, 0x0);
2825 	wr32(hw, NGBE_TSTIMEH, 0x0);
2826 
2827 	ngbe_start_timecounters(dev);
2828 
2829 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2830 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588),
2831 		RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588);
2832 
2833 	/* Enable timestamping of received PTP packets. */
2834 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2835 	tsync_ctl |= NGBE_TSRXCTL_ENA;
2836 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2837 
2838 	/* Enable timestamping of transmitted PTP packets. */
2839 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2840 	tsync_ctl |= NGBE_TSTXCTL_ENA;
2841 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2842 
2843 	ngbe_flush(hw);
2844 
2845 	return 0;
2846 }
2847 
2848 static int
2849 ngbe_timesync_disable(struct rte_eth_dev *dev)
2850 {
2851 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2852 	uint32_t tsync_ctl;
2853 
2854 	/* Disable timestamping of transmitted PTP packets. */
2855 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2856 	tsync_ctl &= ~NGBE_TSTXCTL_ENA;
2857 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2858 
2859 	/* Disable timestamping of received PTP packets. */
2860 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2861 	tsync_ctl &= ~NGBE_TSRXCTL_ENA;
2862 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2863 
2864 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2865 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
2866 
2867 	/* Stop incrementing the System Time registers. */
2868 	wr32(hw, NGBE_TSTIMEINC, 0);
2869 
2870 	return 0;
2871 }
2872 
2873 static int
2874 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
2875 				 struct timespec *timestamp,
2876 				 uint32_t flags __rte_unused)
2877 {
2878 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2879 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2880 	uint32_t tsync_rxctl;
2881 	uint64_t rx_tstamp_cycles;
2882 	uint64_t ns;
2883 
2884 	tsync_rxctl = rd32(hw, NGBE_TSRXCTL);
2885 	if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0)
2886 		return -EINVAL;
2887 
2888 	rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev);
2889 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
2890 	*timestamp = rte_ns_to_timespec(ns);
2891 
2892 	return  0;
2893 }
2894 
2895 static int
2896 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
2897 				 struct timespec *timestamp)
2898 {
2899 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2900 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2901 	uint32_t tsync_txctl;
2902 	uint64_t tx_tstamp_cycles;
2903 	uint64_t ns;
2904 
2905 	tsync_txctl = rd32(hw, NGBE_TSTXCTL);
2906 	if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0)
2907 		return -EINVAL;
2908 
2909 	tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev);
2910 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
2911 	*timestamp = rte_ns_to_timespec(ns);
2912 
2913 	return 0;
2914 }
2915 
2916 static int
2917 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
2918 {
2919 	int count = 0;
2920 	int g_ind = 0;
2921 	const struct reg_info *reg_group;
2922 	const struct reg_info **reg_set = ngbe_regs_others;
2923 
2924 	while ((reg_group = reg_set[g_ind++]))
2925 		count += ngbe_regs_group_count(reg_group);
2926 
2927 	return count;
2928 }
2929 
2930 static int
2931 ngbe_get_regs(struct rte_eth_dev *dev,
2932 	      struct rte_dev_reg_info *regs)
2933 {
2934 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2935 	uint32_t *data = regs->data;
2936 	int g_ind = 0;
2937 	int count = 0;
2938 	const struct reg_info *reg_group;
2939 	const struct reg_info **reg_set = ngbe_regs_others;
2940 
2941 	if (data == NULL) {
2942 		regs->length = ngbe_get_reg_length(dev);
2943 		regs->width = sizeof(uint32_t);
2944 		return 0;
2945 	}
2946 
2947 	/* Support only full register dump */
2948 	if (regs->length == 0 ||
2949 	    regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
2950 		regs->version = hw->mac.type << 24 |
2951 				hw->revision_id << 16 |
2952 				hw->device_id;
2953 		while ((reg_group = reg_set[g_ind++]))
2954 			count += ngbe_read_regs_group(dev, &data[count],
2955 						      reg_group);
2956 		return 0;
2957 	}
2958 
2959 	return -ENOTSUP;
2960 }
2961 
2962 static int
2963 ngbe_get_eeprom_length(struct rte_eth_dev *dev)
2964 {
2965 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2966 
2967 	/* Return unit is byte count */
2968 	return hw->rom.word_size * 2;
2969 }
2970 
2971 static int
2972 ngbe_get_eeprom(struct rte_eth_dev *dev,
2973 		struct rte_dev_eeprom_info *in_eeprom)
2974 {
2975 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2976 	struct ngbe_rom_info *eeprom = &hw->rom;
2977 	uint16_t *data = in_eeprom->data;
2978 	int first, length;
2979 
2980 	first = in_eeprom->offset >> 1;
2981 	length = in_eeprom->length >> 1;
2982 	if (first > hw->rom.word_size ||
2983 	    ((first + length) > hw->rom.word_size))
2984 		return -EINVAL;
2985 
2986 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
2987 
2988 	return eeprom->readw_buffer(hw, first, length, data);
2989 }
2990 
2991 static int
2992 ngbe_set_eeprom(struct rte_eth_dev *dev,
2993 		struct rte_dev_eeprom_info *in_eeprom)
2994 {
2995 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2996 	struct ngbe_rom_info *eeprom = &hw->rom;
2997 	uint16_t *data = in_eeprom->data;
2998 	int first, length;
2999 
3000 	first = in_eeprom->offset >> 1;
3001 	length = in_eeprom->length >> 1;
3002 	if (first > hw->rom.word_size ||
3003 	    ((first + length) > hw->rom.word_size))
3004 		return -EINVAL;
3005 
3006 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3007 
3008 	return eeprom->writew_buffer(hw,  first, length, data);
3009 }
3010 
3011 static const struct eth_dev_ops ngbe_eth_dev_ops = {
3012 	.dev_configure              = ngbe_dev_configure,
3013 	.dev_infos_get              = ngbe_dev_info_get,
3014 	.dev_start                  = ngbe_dev_start,
3015 	.dev_stop                   = ngbe_dev_stop,
3016 	.dev_close                  = ngbe_dev_close,
3017 	.dev_reset                  = ngbe_dev_reset,
3018 	.promiscuous_enable         = ngbe_dev_promiscuous_enable,
3019 	.promiscuous_disable        = ngbe_dev_promiscuous_disable,
3020 	.allmulticast_enable        = ngbe_dev_allmulticast_enable,
3021 	.allmulticast_disable       = ngbe_dev_allmulticast_disable,
3022 	.link_update                = ngbe_dev_link_update,
3023 	.stats_get                  = ngbe_dev_stats_get,
3024 	.xstats_get                 = ngbe_dev_xstats_get,
3025 	.xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
3026 	.stats_reset                = ngbe_dev_stats_reset,
3027 	.xstats_reset               = ngbe_dev_xstats_reset,
3028 	.xstats_get_names           = ngbe_dev_xstats_get_names,
3029 	.xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
3030 	.fw_version_get             = ngbe_fw_version_get,
3031 	.dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
3032 	.mtu_set                    = ngbe_dev_mtu_set,
3033 	.vlan_filter_set            = ngbe_vlan_filter_set,
3034 	.vlan_tpid_set              = ngbe_vlan_tpid_set,
3035 	.vlan_offload_set           = ngbe_vlan_offload_set,
3036 	.vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
3037 	.rx_queue_start	            = ngbe_dev_rx_queue_start,
3038 	.rx_queue_stop              = ngbe_dev_rx_queue_stop,
3039 	.tx_queue_start	            = ngbe_dev_tx_queue_start,
3040 	.tx_queue_stop              = ngbe_dev_tx_queue_stop,
3041 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
3042 	.rx_queue_release           = ngbe_dev_rx_queue_release,
3043 	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
3044 	.tx_queue_release           = ngbe_dev_tx_queue_release,
3045 	.dev_led_on                 = ngbe_dev_led_on,
3046 	.dev_led_off                = ngbe_dev_led_off,
3047 	.flow_ctrl_get              = ngbe_flow_ctrl_get,
3048 	.flow_ctrl_set              = ngbe_flow_ctrl_set,
3049 	.mac_addr_add               = ngbe_add_rar,
3050 	.mac_addr_remove            = ngbe_remove_rar,
3051 	.mac_addr_set               = ngbe_set_default_mac_addr,
3052 	.uc_hash_table_set          = ngbe_uc_hash_table_set,
3053 	.uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
3054 	.reta_update                = ngbe_dev_rss_reta_update,
3055 	.reta_query                 = ngbe_dev_rss_reta_query,
3056 	.rss_hash_update            = ngbe_dev_rss_hash_update,
3057 	.rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
3058 	.set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
3059 	.rxq_info_get               = ngbe_rxq_info_get,
3060 	.txq_info_get               = ngbe_txq_info_get,
3061 	.rx_burst_mode_get          = ngbe_rx_burst_mode_get,
3062 	.tx_burst_mode_get          = ngbe_tx_burst_mode_get,
3063 	.timesync_enable            = ngbe_timesync_enable,
3064 	.timesync_disable           = ngbe_timesync_disable,
3065 	.timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
3066 	.timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp,
3067 	.get_reg                    = ngbe_get_regs,
3068 	.get_eeprom_length          = ngbe_get_eeprom_length,
3069 	.get_eeprom                 = ngbe_get_eeprom,
3070 	.set_eeprom                 = ngbe_set_eeprom,
3071 	.timesync_adjust_time       = ngbe_timesync_adjust_time,
3072 	.timesync_read_time         = ngbe_timesync_read_time,
3073 	.timesync_write_time        = ngbe_timesync_write_time,
3074 	.tx_done_cleanup            = ngbe_dev_tx_done_cleanup,
3075 };
3076 
3077 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
3078 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
3079 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
3080 
3081 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
3082 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
3083 
3084 #ifdef RTE_ETHDEV_DEBUG_RX
3085 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
3086 #endif
3087 #ifdef RTE_ETHDEV_DEBUG_TX
3088 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
3089 #endif
3090