xref: /dpdk/drivers/net/ngbe/ngbe_ethdev.c (revision 6903de616a1296e368002687cb43ef3b9fc4699f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9 
10 #include <rte_alarm.h>
11 
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16 #include "ngbe_regs_group.h"
17 
18 static const struct reg_info ngbe_regs_general[] = {
19 	{NGBE_RST, 1, 1, "NGBE_RST"},
20 	{NGBE_STAT, 1, 1, "NGBE_STAT"},
21 	{NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
22 	{NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
23 	{NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
24 	{NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
25 	{0, 0, 0, ""}
26 };
27 
28 static const struct reg_info ngbe_regs_nvm[] = {
29 	{0, 0, 0, ""}
30 };
31 
32 static const struct reg_info ngbe_regs_interrupt[] = {
33 	{0, 0, 0, ""}
34 };
35 
36 static const struct reg_info ngbe_regs_fctl_others[] = {
37 	{0, 0, 0, ""}
38 };
39 
40 static const struct reg_info ngbe_regs_rxdma[] = {
41 	{0, 0, 0, ""}
42 };
43 
44 static const struct reg_info ngbe_regs_rx[] = {
45 	{0, 0, 0, ""}
46 };
47 
48 static struct reg_info ngbe_regs_tx[] = {
49 	{0, 0, 0, ""}
50 };
51 
52 static const struct reg_info ngbe_regs_wakeup[] = {
53 	{0, 0, 0, ""}
54 };
55 
56 static const struct reg_info ngbe_regs_mac[] = {
57 	{0, 0, 0, ""}
58 };
59 
60 static const struct reg_info ngbe_regs_diagnostic[] = {
61 	{0, 0, 0, ""},
62 };
63 
64 /* PF registers */
65 static const struct reg_info *ngbe_regs_others[] = {
66 				ngbe_regs_general,
67 				ngbe_regs_nvm,
68 				ngbe_regs_interrupt,
69 				ngbe_regs_fctl_others,
70 				ngbe_regs_rxdma,
71 				ngbe_regs_rx,
72 				ngbe_regs_tx,
73 				ngbe_regs_wakeup,
74 				ngbe_regs_mac,
75 				ngbe_regs_diagnostic,
76 				NULL};
77 
78 static int ngbe_dev_close(struct rte_eth_dev *dev);
79 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
80 				int wait_to_complete);
81 static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
82 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
83 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
84 					uint16_t queue);
85 
86 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
87 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
89 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
90 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
91 static void ngbe_dev_interrupt_handler(void *param);
92 static void ngbe_configure_msix(struct rte_eth_dev *dev);
93 
94 #define NGBE_SET_HWSTRIP(h, q) do {\
95 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
96 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
97 		(h)->bitmap[idx] |= 1 << bit;\
98 	} while (0)
99 
100 #define NGBE_CLEAR_HWSTRIP(h, q) do {\
101 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
102 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
103 		(h)->bitmap[idx] &= ~(1 << bit);\
104 	} while (0)
105 
106 #define NGBE_GET_HWSTRIP(h, q, r) do {\
107 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
108 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
109 		(r) = (h)->bitmap[idx] >> bit & 1;\
110 	} while (0)
111 
112 /*
113  * The set of PCI devices this driver supports
114  */
115 static const struct rte_pci_id pci_id_ngbe_map[] = {
116 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
117 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
118 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
119 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
120 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
121 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
122 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
123 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
124 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
128 	{ .vendor_id = 0, /* sentinel */ },
129 };
130 
131 static const struct rte_eth_desc_lim rx_desc_lim = {
132 	.nb_max = NGBE_RING_DESC_MAX,
133 	.nb_min = NGBE_RING_DESC_MIN,
134 	.nb_align = NGBE_RXD_ALIGN,
135 };
136 
137 static const struct rte_eth_desc_lim tx_desc_lim = {
138 	.nb_max = NGBE_RING_DESC_MAX,
139 	.nb_min = NGBE_RING_DESC_MIN,
140 	.nb_align = NGBE_TXD_ALIGN,
141 	.nb_seg_max = NGBE_TX_MAX_SEG,
142 	.nb_mtu_seg_max = NGBE_TX_MAX_SEG,
143 };
144 
145 static const struct eth_dev_ops ngbe_eth_dev_ops;
146 
147 #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
148 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
149 static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
150 	/* MNG RxTx */
151 	HW_XSTAT(mng_bmc2host_packets),
152 	HW_XSTAT(mng_host2bmc_packets),
153 	/* Basic RxTx */
154 	HW_XSTAT(rx_packets),
155 	HW_XSTAT(tx_packets),
156 	HW_XSTAT(rx_bytes),
157 	HW_XSTAT(tx_bytes),
158 	HW_XSTAT(rx_total_bytes),
159 	HW_XSTAT(rx_total_packets),
160 	HW_XSTAT(tx_total_packets),
161 	HW_XSTAT(rx_total_missed_packets),
162 	HW_XSTAT(rx_broadcast_packets),
163 	HW_XSTAT(tx_broadcast_packets),
164 	HW_XSTAT(rx_multicast_packets),
165 	HW_XSTAT(tx_multicast_packets),
166 	HW_XSTAT(rx_management_packets),
167 	HW_XSTAT(tx_management_packets),
168 	HW_XSTAT(rx_management_dropped),
169 	HW_XSTAT(rx_dma_drop),
170 	HW_XSTAT(tx_dma_drop),
171 	HW_XSTAT(tx_secdrp_packets),
172 
173 	/* Basic Error */
174 	HW_XSTAT(rx_crc_errors),
175 	HW_XSTAT(rx_illegal_byte_errors),
176 	HW_XSTAT(rx_error_bytes),
177 	HW_XSTAT(rx_mac_short_packet_dropped),
178 	HW_XSTAT(rx_length_errors),
179 	HW_XSTAT(rx_undersize_errors),
180 	HW_XSTAT(rx_fragment_errors),
181 	HW_XSTAT(rx_oversize_cnt),
182 	HW_XSTAT(rx_jabber_errors),
183 	HW_XSTAT(rx_l3_l4_xsum_error),
184 	HW_XSTAT(mac_local_errors),
185 	HW_XSTAT(mac_remote_errors),
186 
187 	/* PB Stats */
188 	HW_XSTAT(rx_up_dropped),
189 	HW_XSTAT(rdb_pkt_cnt),
190 	HW_XSTAT(rdb_repli_cnt),
191 	HW_XSTAT(rdb_drp_cnt),
192 
193 	/* MACSEC */
194 	HW_XSTAT(tx_macsec_pkts_untagged),
195 	HW_XSTAT(tx_macsec_pkts_encrypted),
196 	HW_XSTAT(tx_macsec_pkts_protected),
197 	HW_XSTAT(tx_macsec_octets_encrypted),
198 	HW_XSTAT(tx_macsec_octets_protected),
199 	HW_XSTAT(rx_macsec_pkts_untagged),
200 	HW_XSTAT(rx_macsec_pkts_badtag),
201 	HW_XSTAT(rx_macsec_pkts_nosci),
202 	HW_XSTAT(rx_macsec_pkts_unknownsci),
203 	HW_XSTAT(rx_macsec_octets_decrypted),
204 	HW_XSTAT(rx_macsec_octets_validated),
205 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
206 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
207 	HW_XSTAT(rx_macsec_sc_pkts_late),
208 	HW_XSTAT(rx_macsec_sa_pkts_ok),
209 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
210 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
211 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
212 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
213 
214 	/* MAC RxTx */
215 	HW_XSTAT(rx_size_64_packets),
216 	HW_XSTAT(rx_size_65_to_127_packets),
217 	HW_XSTAT(rx_size_128_to_255_packets),
218 	HW_XSTAT(rx_size_256_to_511_packets),
219 	HW_XSTAT(rx_size_512_to_1023_packets),
220 	HW_XSTAT(rx_size_1024_to_max_packets),
221 	HW_XSTAT(tx_size_64_packets),
222 	HW_XSTAT(tx_size_65_to_127_packets),
223 	HW_XSTAT(tx_size_128_to_255_packets),
224 	HW_XSTAT(tx_size_256_to_511_packets),
225 	HW_XSTAT(tx_size_512_to_1023_packets),
226 	HW_XSTAT(tx_size_1024_to_max_packets),
227 
228 	/* Flow Control */
229 	HW_XSTAT(tx_xon_packets),
230 	HW_XSTAT(rx_xon_packets),
231 	HW_XSTAT(tx_xoff_packets),
232 	HW_XSTAT(rx_xoff_packets),
233 
234 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
235 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
236 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
237 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
238 };
239 
240 #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
241 			   sizeof(rte_ngbe_stats_strings[0]))
242 
243 /* Per-queue statistics */
244 #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
245 static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
246 	QP_XSTAT(rx_qp_packets),
247 	QP_XSTAT(tx_qp_packets),
248 	QP_XSTAT(rx_qp_bytes),
249 	QP_XSTAT(tx_qp_bytes),
250 	QP_XSTAT(rx_qp_mc_packets),
251 };
252 
253 #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
254 			   sizeof(rte_ngbe_qp_strings[0]))
255 
256 static inline int32_t
257 ngbe_pf_reset_hw(struct ngbe_hw *hw)
258 {
259 	uint32_t ctrl_ext;
260 	int32_t status;
261 
262 	status = hw->mac.reset_hw(hw);
263 
264 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
265 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
266 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
267 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
268 	ngbe_flush(hw);
269 
270 	if (status == NGBE_ERR_SFP_NOT_PRESENT)
271 		status = 0;
272 	return status;
273 }
274 
275 static inline void
276 ngbe_enable_intr(struct rte_eth_dev *dev)
277 {
278 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
279 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
280 
281 	wr32(hw, NGBE_IENMISC, intr->mask_misc);
282 	wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
283 	ngbe_flush(hw);
284 }
285 
286 static void
287 ngbe_disable_intr(struct ngbe_hw *hw)
288 {
289 	PMD_INIT_FUNC_TRACE();
290 
291 	wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
292 	ngbe_flush(hw);
293 }
294 
295 /*
296  * Ensure that all locks are released before first NVM or PHY access
297  */
298 static void
299 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
300 {
301 	uint16_t mask;
302 
303 	/*
304 	 * These ones are more tricky since they are common to all ports; but
305 	 * swfw_sync retries last long enough (1s) to be almost sure that if
306 	 * lock can not be taken it is due to an improper lock of the
307 	 * semaphore.
308 	 */
309 	mask = NGBE_MNGSEM_SWPHY |
310 	       NGBE_MNGSEM_SWMBX |
311 	       NGBE_MNGSEM_SWFLASH;
312 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
313 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
314 
315 	hw->mac.release_swfw_sync(hw, mask);
316 }
317 
318 static int
319 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
320 {
321 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
322 	struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
323 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
324 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
325 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
326 	const struct rte_memzone *mz;
327 	uint32_t ctrl_ext;
328 	u32 led_conf = 0;
329 	int err, ret;
330 
331 	PMD_INIT_FUNC_TRACE();
332 
333 	eth_dev->dev_ops = &ngbe_eth_dev_ops;
334 	eth_dev->rx_queue_count       = ngbe_dev_rx_queue_count;
335 	eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
336 	eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
337 	eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
338 	eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
339 	eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
340 
341 	/*
342 	 * For secondary processes, we don't initialise any further as primary
343 	 * has already done this work. Only check we don't need a different
344 	 * Rx and Tx function.
345 	 */
346 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
347 		struct ngbe_tx_queue *txq;
348 		/* Tx queue function in primary, set by last queue initialized
349 		 * Tx queue may not initialized by primary process
350 		 */
351 		if (eth_dev->data->tx_queues) {
352 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
353 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
354 			ngbe_set_tx_function(eth_dev, txq);
355 		} else {
356 			/* Use default Tx function if we get here */
357 			PMD_INIT_LOG(NOTICE,
358 				"No Tx queues configured yet. Using default Tx function.");
359 		}
360 
361 		ngbe_set_rx_function(eth_dev);
362 
363 		return 0;
364 	}
365 
366 	rte_eth_copy_pci_info(eth_dev, pci_dev);
367 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
368 
369 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
370 
371 	/* Vendor and Device ID need to be set before init of shared code */
372 	hw->back = pci_dev;
373 	hw->device_id = pci_dev->id.device_id;
374 	hw->vendor_id = pci_dev->id.vendor_id;
375 	if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) {
376 		hw->sub_system_id = pci_dev->id.subsystem_device_id;
377 	} else {
378 		u32 ssid;
379 
380 		ssid = ngbe_flash_read_dword(hw, 0xFFFDC);
381 		if (ssid == 0x1) {
382 			PMD_INIT_LOG(ERR,
383 				"Read of internal subsystem device id failed\n");
384 			return -ENODEV;
385 		}
386 		hw->sub_system_id = (u16)ssid >> 8 | (u16)ssid << 8;
387 	}
388 	ngbe_map_device_id(hw);
389 
390 	/* Reserve memory for interrupt status block */
391 	mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
392 		NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
393 	if (mz == NULL)
394 		return -ENOMEM;
395 
396 	hw->isb_dma = TMZ_PADDR(mz);
397 	hw->isb_mem = TMZ_VADDR(mz);
398 
399 	/* Initialize the shared code (base driver) */
400 	err = ngbe_init_shared_code(hw);
401 	if (err != 0) {
402 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
403 		return -EIO;
404 	}
405 
406 	/* Unlock any pending hardware semaphore */
407 	ngbe_swfw_lock_reset(hw);
408 
409 	/* Get Hardware Flow Control setting */
410 	hw->fc.requested_mode = ngbe_fc_full;
411 	hw->fc.current_mode = ngbe_fc_full;
412 	hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
413 	hw->fc.low_water = NGBE_FC_XON_LOTH;
414 	hw->fc.high_water = NGBE_FC_XOFF_HITH;
415 	hw->fc.send_xon = 1;
416 
417 	err = hw->rom.init_params(hw);
418 	if (err != 0) {
419 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
420 		return -EIO;
421 	}
422 
423 	/* Make sure we have a good EEPROM before we read from it */
424 	err = hw->rom.validate_checksum(hw, NULL);
425 	if (err != 0) {
426 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
427 		return -EIO;
428 	}
429 
430 	err = hw->phy.led_oem_chk(hw, &led_conf);
431 	if (err == 0)
432 		hw->led_conf = led_conf;
433 	else
434 		hw->led_conf = 0xFFFF;
435 
436 	err = hw->mac.init_hw(hw);
437 	if (err != 0) {
438 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
439 		return -EIO;
440 	}
441 
442 	/* Reset the hw statistics */
443 	ngbe_dev_stats_reset(eth_dev);
444 
445 	/* disable interrupt */
446 	ngbe_disable_intr(hw);
447 
448 	/* Allocate memory for storing MAC addresses */
449 	eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
450 					       hw->mac.num_rar_entries, 0);
451 	if (eth_dev->data->mac_addrs == NULL) {
452 		PMD_INIT_LOG(ERR,
453 			     "Failed to allocate %u bytes needed to store MAC addresses",
454 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
455 		return -ENOMEM;
456 	}
457 
458 	/* Copy the permanent MAC address */
459 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
460 			&eth_dev->data->mac_addrs[0]);
461 
462 	/* Allocate memory for storing hash filter MAC addresses */
463 	eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
464 			RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
465 	if (eth_dev->data->hash_mac_addrs == NULL) {
466 		PMD_INIT_LOG(ERR,
467 			     "Failed to allocate %d bytes needed to store MAC addresses",
468 			     RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
469 		rte_free(eth_dev->data->mac_addrs);
470 		eth_dev->data->mac_addrs = NULL;
471 		return -ENOMEM;
472 	}
473 
474 	/* initialize the vfta */
475 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
476 
477 	/* initialize the hw strip bitmap*/
478 	memset(hwstrip, 0, sizeof(*hwstrip));
479 
480 	/* initialize PF if max_vfs not zero */
481 	ret = ngbe_pf_host_init(eth_dev);
482 	if (ret) {
483 		rte_free(eth_dev->data->mac_addrs);
484 		eth_dev->data->mac_addrs = NULL;
485 		rte_free(eth_dev->data->hash_mac_addrs);
486 		eth_dev->data->hash_mac_addrs = NULL;
487 		return ret;
488 	}
489 
490 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
491 	/* let hardware know driver is loaded */
492 	ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
493 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
494 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
495 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
496 	ngbe_flush(hw);
497 
498 	PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
499 			(int)hw->mac.type, (int)hw->phy.type);
500 
501 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
502 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
503 		     pci_dev->id.device_id);
504 
505 	rte_intr_callback_register(intr_handle,
506 				   ngbe_dev_interrupt_handler, eth_dev);
507 
508 	/* enable uio/vfio intr/eventfd mapping */
509 	rte_intr_enable(intr_handle);
510 
511 	/* enable support intr */
512 	ngbe_enable_intr(eth_dev);
513 
514 	return 0;
515 }
516 
517 static int
518 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
519 {
520 	PMD_INIT_FUNC_TRACE();
521 
522 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
523 		return 0;
524 
525 	ngbe_dev_close(eth_dev);
526 
527 	return 0;
528 }
529 
530 static int
531 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
532 		struct rte_pci_device *pci_dev)
533 {
534 	return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
535 			sizeof(struct ngbe_adapter),
536 			eth_dev_pci_specific_init, pci_dev,
537 			eth_ngbe_dev_init, NULL);
538 }
539 
540 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
541 {
542 	struct rte_eth_dev *ethdev;
543 
544 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
545 	if (ethdev == NULL)
546 		return 0;
547 
548 	return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
549 }
550 
551 static struct rte_pci_driver rte_ngbe_pmd = {
552 	.id_table = pci_id_ngbe_map,
553 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
554 		     RTE_PCI_DRV_INTR_LSC,
555 	.probe = eth_ngbe_pci_probe,
556 	.remove = eth_ngbe_pci_remove,
557 };
558 
559 static int
560 ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
561 {
562 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
563 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
564 	uint32_t vfta;
565 	uint32_t vid_idx;
566 	uint32_t vid_bit;
567 
568 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
569 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
570 	vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
571 	if (on)
572 		vfta |= vid_bit;
573 	else
574 		vfta &= ~vid_bit;
575 	wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
576 
577 	/* update local VFTA copy */
578 	shadow_vfta->vfta[vid_idx] = vfta;
579 
580 	return 0;
581 }
582 
583 static void
584 ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
585 {
586 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
587 	struct ngbe_rx_queue *rxq;
588 	bool restart;
589 	uint32_t rxcfg, rxbal, rxbah;
590 
591 	if (on)
592 		ngbe_vlan_hw_strip_enable(dev, queue);
593 	else
594 		ngbe_vlan_hw_strip_disable(dev, queue);
595 
596 	rxq = dev->data->rx_queues[queue];
597 	rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx));
598 	rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx));
599 	rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
600 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
601 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
602 			!(rxcfg & NGBE_RXCFG_VLAN);
603 		rxcfg |= NGBE_RXCFG_VLAN;
604 	} else {
605 		restart = (rxcfg & NGBE_RXCFG_ENA) &&
606 			(rxcfg & NGBE_RXCFG_VLAN);
607 		rxcfg &= ~NGBE_RXCFG_VLAN;
608 	}
609 	rxcfg &= ~NGBE_RXCFG_ENA;
610 
611 	if (restart) {
612 		/* set vlan strip for ring */
613 		ngbe_dev_rx_queue_stop(dev, queue);
614 		wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal);
615 		wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah);
616 		wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg);
617 		ngbe_dev_rx_queue_start(dev, queue);
618 	}
619 }
620 
621 static int
622 ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
623 		    enum rte_vlan_type vlan_type,
624 		    uint16_t tpid)
625 {
626 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
627 	int ret = 0;
628 	uint32_t portctrl, vlan_ext, qinq;
629 
630 	portctrl = rd32(hw, NGBE_PORTCTL);
631 
632 	vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
633 	qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
634 	switch (vlan_type) {
635 	case RTE_ETH_VLAN_TYPE_INNER:
636 		if (vlan_ext) {
637 			wr32m(hw, NGBE_VLANCTL,
638 				NGBE_VLANCTL_TPID_MASK,
639 				NGBE_VLANCTL_TPID(tpid));
640 			wr32m(hw, NGBE_DMATXCTRL,
641 				NGBE_DMATXCTRL_TPID_MASK,
642 				NGBE_DMATXCTRL_TPID(tpid));
643 		} else {
644 			ret = -ENOTSUP;
645 			PMD_DRV_LOG(ERR,
646 				"Inner type is not supported by single VLAN");
647 		}
648 
649 		if (qinq) {
650 			wr32m(hw, NGBE_TAGTPID(0),
651 				NGBE_TAGTPID_LSB_MASK,
652 				NGBE_TAGTPID_LSB(tpid));
653 		}
654 		break;
655 	case RTE_ETH_VLAN_TYPE_OUTER:
656 		if (vlan_ext) {
657 			/* Only the high 16-bits is valid */
658 			wr32m(hw, NGBE_EXTAG,
659 				NGBE_EXTAG_VLAN_MASK,
660 				NGBE_EXTAG_VLAN(tpid));
661 		} else {
662 			wr32m(hw, NGBE_VLANCTL,
663 				NGBE_VLANCTL_TPID_MASK,
664 				NGBE_VLANCTL_TPID(tpid));
665 			wr32m(hw, NGBE_DMATXCTRL,
666 				NGBE_DMATXCTRL_TPID_MASK,
667 				NGBE_DMATXCTRL_TPID(tpid));
668 		}
669 
670 		if (qinq) {
671 			wr32m(hw, NGBE_TAGTPID(0),
672 				NGBE_TAGTPID_MSB_MASK,
673 				NGBE_TAGTPID_MSB(tpid));
674 		}
675 		break;
676 	default:
677 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
678 		return -EINVAL;
679 	}
680 
681 	return ret;
682 }
683 
684 void
685 ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
686 {
687 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
688 	uint32_t vlnctrl;
689 
690 	PMD_INIT_FUNC_TRACE();
691 
692 	/* Filter Table Disable */
693 	vlnctrl = rd32(hw, NGBE_VLANCTL);
694 	vlnctrl &= ~NGBE_VLANCTL_VFE;
695 	wr32(hw, NGBE_VLANCTL, vlnctrl);
696 }
697 
698 void
699 ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
700 {
701 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
702 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
703 	uint32_t vlnctrl;
704 	uint16_t i;
705 
706 	PMD_INIT_FUNC_TRACE();
707 
708 	/* Filter Table Enable */
709 	vlnctrl = rd32(hw, NGBE_VLANCTL);
710 	vlnctrl &= ~NGBE_VLANCTL_CFIENA;
711 	vlnctrl |= NGBE_VLANCTL_VFE;
712 	wr32(hw, NGBE_VLANCTL, vlnctrl);
713 
714 	/* write whatever is in local vfta copy */
715 	for (i = 0; i < NGBE_VFTA_SIZE; i++)
716 		wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
717 }
718 
719 void
720 ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
721 {
722 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
723 	struct ngbe_rx_queue *rxq;
724 
725 	if (queue >= NGBE_MAX_RX_QUEUE_NUM)
726 		return;
727 
728 	if (on)
729 		NGBE_SET_HWSTRIP(hwstrip, queue);
730 	else
731 		NGBE_CLEAR_HWSTRIP(hwstrip, queue);
732 
733 	if (queue >= dev->data->nb_rx_queues)
734 		return;
735 
736 	rxq = dev->data->rx_queues[queue];
737 
738 	if (on) {
739 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
740 		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
741 	} else {
742 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
743 		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
744 	}
745 }
746 
747 static void
748 ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
749 {
750 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
751 	uint32_t ctrl;
752 
753 	PMD_INIT_FUNC_TRACE();
754 
755 	ctrl = rd32(hw, NGBE_RXCFG(queue));
756 	ctrl &= ~NGBE_RXCFG_VLAN;
757 	wr32(hw, NGBE_RXCFG(queue), ctrl);
758 
759 	/* record those setting for HW strip per queue */
760 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
761 }
762 
763 static void
764 ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
765 {
766 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
767 	uint32_t ctrl;
768 
769 	PMD_INIT_FUNC_TRACE();
770 
771 	ctrl = rd32(hw, NGBE_RXCFG(queue));
772 	ctrl |= NGBE_RXCFG_VLAN;
773 	wr32(hw, NGBE_RXCFG(queue), ctrl);
774 
775 	/* record those setting for HW strip per queue */
776 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
777 }
778 
779 static void
780 ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
781 {
782 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
783 	uint32_t ctrl;
784 
785 	PMD_INIT_FUNC_TRACE();
786 
787 	ctrl = rd32(hw, NGBE_PORTCTL);
788 	ctrl &= ~NGBE_PORTCTL_VLANEXT;
789 	ctrl &= ~NGBE_PORTCTL_QINQ;
790 	wr32(hw, NGBE_PORTCTL, ctrl);
791 }
792 
793 static void
794 ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
795 {
796 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
797 	uint32_t ctrl;
798 
799 	PMD_INIT_FUNC_TRACE();
800 
801 	ctrl  = rd32(hw, NGBE_PORTCTL);
802 	ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
803 	wr32(hw, NGBE_PORTCTL, ctrl);
804 }
805 
806 static void
807 ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
808 {
809 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
810 	uint32_t ctrl;
811 
812 	PMD_INIT_FUNC_TRACE();
813 
814 	ctrl = rd32(hw, NGBE_PORTCTL);
815 	ctrl &= ~NGBE_PORTCTL_QINQ;
816 	wr32(hw, NGBE_PORTCTL, ctrl);
817 }
818 
819 static void
820 ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
821 {
822 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
823 	uint32_t ctrl;
824 
825 	PMD_INIT_FUNC_TRACE();
826 
827 	ctrl  = rd32(hw, NGBE_PORTCTL);
828 	ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
829 	wr32(hw, NGBE_PORTCTL, ctrl);
830 }
831 
832 void
833 ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
834 {
835 	struct ngbe_rx_queue *rxq;
836 	uint16_t i;
837 
838 	PMD_INIT_FUNC_TRACE();
839 
840 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
841 		rxq = dev->data->rx_queues[i];
842 
843 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
844 			ngbe_vlan_hw_strip_enable(dev, i);
845 		else
846 			ngbe_vlan_hw_strip_disable(dev, i);
847 	}
848 }
849 
850 void
851 ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
852 {
853 	uint16_t i;
854 	struct rte_eth_rxmode *rxmode;
855 	struct ngbe_rx_queue *rxq;
856 
857 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
858 		rxmode = &dev->data->dev_conf.rxmode;
859 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
860 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
861 				rxq = dev->data->rx_queues[i];
862 				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
863 			}
864 		else
865 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
866 				rxq = dev->data->rx_queues[i];
867 				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
868 			}
869 	}
870 }
871 
872 static int
873 ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
874 {
875 	struct rte_eth_rxmode *rxmode;
876 	rxmode = &dev->data->dev_conf.rxmode;
877 
878 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
879 		ngbe_vlan_hw_strip_config(dev);
880 
881 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
882 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
883 			ngbe_vlan_hw_filter_enable(dev);
884 		else
885 			ngbe_vlan_hw_filter_disable(dev);
886 	}
887 
888 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
889 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
890 			ngbe_vlan_hw_extend_enable(dev);
891 		else
892 			ngbe_vlan_hw_extend_disable(dev);
893 	}
894 
895 	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
896 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
897 			ngbe_qinq_hw_strip_enable(dev);
898 		else
899 			ngbe_qinq_hw_strip_disable(dev);
900 	}
901 
902 	return 0;
903 }
904 
905 static int
906 ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
907 {
908 	ngbe_config_vlan_strip_on_all_queues(dev, mask);
909 
910 	ngbe_vlan_offload_config(dev, mask);
911 
912 	return 0;
913 }
914 
915 static int
916 ngbe_dev_configure(struct rte_eth_dev *dev)
917 {
918 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
919 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
920 
921 	PMD_INIT_FUNC_TRACE();
922 
923 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
924 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
925 
926 	/* set flag to update link status after init */
927 	intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
928 
929 	/*
930 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
931 	 * allocation Rx preconditions we will reset it.
932 	 */
933 	adapter->rx_bulk_alloc_allowed = true;
934 
935 	return 0;
936 }
937 
938 static void
939 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
940 {
941 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
942 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
943 
944 	wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
945 	wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
946 	wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
947 	if (hw->phy.type == ngbe_phy_yt8521s_sfi)
948 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
949 	else
950 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
951 
952 	intr->mask_misc |= NGBE_ICRMISC_GPIO | NGBE_ICRMISC_HEAT;
953 }
954 
955 /*
956  * Configure device link speed and setup link.
957  * It returns 0 on success.
958  */
959 static int
960 ngbe_dev_start(struct rte_eth_dev *dev)
961 {
962 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
963 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
964 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
965 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
966 	uint32_t intr_vector = 0;
967 	int err;
968 	bool link_up = false, negotiate = false;
969 	uint32_t speed = 0;
970 	uint32_t allowed_speeds = 0;
971 	int mask = 0;
972 	int status;
973 	uint32_t *link_speeds;
974 
975 	PMD_INIT_FUNC_TRACE();
976 
977 	/* disable uio/vfio intr/eventfd mapping */
978 	rte_intr_disable(intr_handle);
979 
980 	/* stop adapter */
981 	hw->adapter_stopped = 0;
982 
983 	/* reinitialize adapter, this calls reset and start */
984 	hw->nb_rx_queues = dev->data->nb_rx_queues;
985 	hw->nb_tx_queues = dev->data->nb_tx_queues;
986 	status = ngbe_pf_reset_hw(hw);
987 	if (status != 0)
988 		return -1;
989 	hw->mac.start_hw(hw);
990 	hw->mac.get_link_status = true;
991 
992 	ngbe_set_pcie_master(hw, true);
993 
994 	/* configure PF module if SRIOV enabled */
995 	ngbe_pf_host_configure(dev);
996 
997 	ngbe_dev_phy_intr_setup(dev);
998 
999 	/* check and configure queue intr-vector mapping */
1000 	if ((rte_intr_cap_multiple(intr_handle) ||
1001 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
1002 	    dev->data->dev_conf.intr_conf.rxq != 0) {
1003 		intr_vector = dev->data->nb_rx_queues;
1004 		if (rte_intr_efd_enable(intr_handle, intr_vector))
1005 			return -1;
1006 	}
1007 
1008 	if (rte_intr_dp_is_en(intr_handle)) {
1009 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
1010 						   dev->data->nb_rx_queues)) {
1011 			PMD_INIT_LOG(ERR,
1012 				     "Failed to allocate %d rx_queues intr_vec",
1013 				     dev->data->nb_rx_queues);
1014 			return -ENOMEM;
1015 		}
1016 	}
1017 
1018 	/* configure MSI-X for sleep until Rx interrupt */
1019 	ngbe_configure_msix(dev);
1020 
1021 	/* initialize transmission unit */
1022 	ngbe_dev_tx_init(dev);
1023 
1024 	/* This can fail when allocating mbufs for descriptor rings */
1025 	err = ngbe_dev_rx_init(dev);
1026 	if (err != 0) {
1027 		PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
1028 		goto error;
1029 	}
1030 
1031 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1032 		RTE_ETH_VLAN_EXTEND_MASK;
1033 	err = ngbe_vlan_offload_config(dev, mask);
1034 	if (err != 0) {
1035 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1036 		goto error;
1037 	}
1038 
1039 	hw->mac.setup_pba(hw);
1040 	ngbe_configure_port(dev);
1041 
1042 	err = ngbe_dev_rxtx_start(dev);
1043 	if (err < 0) {
1044 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1045 		goto error;
1046 	}
1047 
1048 	/* Skip link setup if loopback mode is enabled. */
1049 	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1050 		goto skip_link_setup;
1051 
1052 	hw->lsc = dev->data->dev_conf.intr_conf.lsc;
1053 
1054 	err = hw->mac.check_link(hw, &speed, &link_up, 0);
1055 	if (err != 0)
1056 		goto error;
1057 	dev->data->dev_link.link_status = link_up;
1058 
1059 	link_speeds = &dev->data->dev_conf.link_speeds;
1060 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
1061 		negotiate = true;
1062 
1063 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1064 	if (err != 0)
1065 		goto error;
1066 
1067 	allowed_speeds = 0;
1068 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1069 		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
1070 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1071 		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
1072 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1073 		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
1074 
1075 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
1076 		PMD_INIT_LOG(ERR, "Invalid link setting");
1077 		goto error;
1078 	}
1079 
1080 	speed = 0x0;
1081 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1082 		speed = hw->mac.default_speeds;
1083 	} else {
1084 		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1085 			speed |= NGBE_LINK_SPEED_1GB_FULL;
1086 		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1087 			speed |= NGBE_LINK_SPEED_100M_FULL;
1088 		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
1089 			speed |= NGBE_LINK_SPEED_10M_FULL;
1090 	}
1091 
1092 	err = hw->phy.init_hw(hw);
1093 	if (err != 0) {
1094 		PMD_INIT_LOG(ERR, "PHY init failed");
1095 		goto error;
1096 	}
1097 	err = hw->mac.setup_link(hw, speed, link_up);
1098 	if (err != 0)
1099 		goto error;
1100 
1101 skip_link_setup:
1102 
1103 	if (rte_intr_allow_others(intr_handle)) {
1104 		ngbe_dev_misc_interrupt_setup(dev);
1105 		/* check if lsc interrupt is enabled */
1106 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1107 			ngbe_dev_lsc_interrupt_setup(dev, TRUE);
1108 		else
1109 			ngbe_dev_lsc_interrupt_setup(dev, FALSE);
1110 		ngbe_dev_macsec_interrupt_setup(dev);
1111 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1112 	} else {
1113 		rte_intr_callback_unregister(intr_handle,
1114 					     ngbe_dev_interrupt_handler, dev);
1115 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1116 			PMD_INIT_LOG(INFO,
1117 				     "LSC won't enable because of no intr multiplex");
1118 	}
1119 
1120 	/* check if rxq interrupt is enabled */
1121 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1122 	    rte_intr_dp_is_en(intr_handle))
1123 		ngbe_dev_rxq_interrupt_setup(dev);
1124 
1125 	/* enable UIO/VFIO intr/eventfd mapping */
1126 	rte_intr_enable(intr_handle);
1127 
1128 	/* resume enabled intr since HW reset */
1129 	ngbe_enable_intr(dev);
1130 
1131 	if (hw->gpio_ctl) {
1132 		/* gpio0 is used to power on/off control*/
1133 		wr32(hw, NGBE_GPIODATA, 0);
1134 	}
1135 
1136 	/*
1137 	 * Update link status right before return, because it may
1138 	 * start link configuration process in a separate thread.
1139 	 */
1140 	ngbe_dev_link_update(dev, 0);
1141 
1142 	ngbe_read_stats_registers(hw, hw_stats);
1143 	hw->offset_loaded = 1;
1144 
1145 	return 0;
1146 
1147 error:
1148 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1149 	ngbe_dev_clear_queues(dev);
1150 	return -EIO;
1151 }
1152 
1153 /*
1154  * Stop device: disable rx and tx functions to allow for reconfiguring.
1155  */
1156 static int
1157 ngbe_dev_stop(struct rte_eth_dev *dev)
1158 {
1159 	struct rte_eth_link link;
1160 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1161 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1162 	struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
1163 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1164 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1165 	int vf;
1166 
1167 	if (hw->adapter_stopped)
1168 		return 0;
1169 
1170 	PMD_INIT_FUNC_TRACE();
1171 
1172 	if (hw->gpio_ctl) {
1173 		/* gpio0 is used to power on/off control*/
1174 		wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
1175 	}
1176 
1177 	/* disable interrupts */
1178 	ngbe_disable_intr(hw);
1179 
1180 	/* reset the NIC */
1181 	ngbe_pf_reset_hw(hw);
1182 	hw->adapter_stopped = 0;
1183 
1184 	/* stop adapter */
1185 	ngbe_stop_hw(hw);
1186 
1187 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1188 		vfinfo[vf].clear_to_send = false;
1189 
1190 	hw->phy.set_phy_power(hw, false);
1191 
1192 	ngbe_dev_clear_queues(dev);
1193 
1194 	/* Clear stored conf */
1195 	dev->data->scattered_rx = 0;
1196 
1197 	/* Clear recorded link status */
1198 	memset(&link, 0, sizeof(link));
1199 	rte_eth_linkstatus_set(dev, &link);
1200 
1201 	if (!rte_intr_allow_others(intr_handle))
1202 		/* resume to the default handler */
1203 		rte_intr_callback_register(intr_handle,
1204 					   ngbe_dev_interrupt_handler,
1205 					   (void *)dev);
1206 
1207 	/* Clean datapath event and queue/vec mapping */
1208 	rte_intr_efd_disable(intr_handle);
1209 	rte_intr_vec_list_free(intr_handle);
1210 
1211 	ngbe_set_pcie_master(hw, true);
1212 
1213 	adapter->rss_reta_updated = 0;
1214 
1215 	hw->adapter_stopped = true;
1216 	dev->data->dev_started = 0;
1217 
1218 	return 0;
1219 }
1220 
1221 /*
1222  * Set device link up: power on.
1223  */
1224 static int
1225 ngbe_dev_set_link_up(struct rte_eth_dev *dev)
1226 {
1227 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1228 
1229 	hw->phy.set_phy_power(hw, true);
1230 
1231 	return 0;
1232 }
1233 
1234 /*
1235  * Set device link down: power off.
1236  */
1237 static int
1238 ngbe_dev_set_link_down(struct rte_eth_dev *dev)
1239 {
1240 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1241 
1242 	hw->phy.set_phy_power(hw, false);
1243 
1244 	return 0;
1245 }
1246 
1247 /*
1248  * Reset and stop device.
1249  */
1250 static int
1251 ngbe_dev_close(struct rte_eth_dev *dev)
1252 {
1253 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1254 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1255 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1256 	int retries = 0;
1257 	int ret;
1258 
1259 	PMD_INIT_FUNC_TRACE();
1260 
1261 	ngbe_pf_reset_hw(hw);
1262 
1263 	ngbe_dev_stop(dev);
1264 
1265 	ngbe_dev_free_queues(dev);
1266 
1267 	ngbe_set_pcie_master(hw, false);
1268 
1269 	/* reprogram the RAR[0] in case user changed it. */
1270 	ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1271 
1272 	/* Unlock any pending hardware semaphore */
1273 	ngbe_swfw_lock_reset(hw);
1274 
1275 	/* disable uio intr before callback unregister */
1276 	rte_intr_disable(intr_handle);
1277 
1278 	do {
1279 		ret = rte_intr_callback_unregister(intr_handle,
1280 				ngbe_dev_interrupt_handler, dev);
1281 		if (ret >= 0 || ret == -ENOENT) {
1282 			break;
1283 		} else if (ret != -EAGAIN) {
1284 			PMD_INIT_LOG(ERR,
1285 				"intr callback unregister failed: %d",
1286 				ret);
1287 		}
1288 		rte_delay_ms(100);
1289 	} while (retries++ < (10 + NGBE_LINK_UP_TIME));
1290 
1291 	/* uninitialize PF if max_vfs not zero */
1292 	ngbe_pf_host_uninit(dev);
1293 
1294 	rte_free(dev->data->mac_addrs);
1295 	dev->data->mac_addrs = NULL;
1296 
1297 	rte_free(dev->data->hash_mac_addrs);
1298 	dev->data->hash_mac_addrs = NULL;
1299 
1300 	return ret;
1301 }
1302 
1303 /*
1304  * Reset PF device.
1305  */
1306 static int
1307 ngbe_dev_reset(struct rte_eth_dev *dev)
1308 {
1309 	int ret;
1310 
1311 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
1312 	 * its VF to make them align with it. The detailed notification
1313 	 * mechanism is PMD specific. As to ngbe PF, it is rather complex.
1314 	 * To avoid unexpected behavior in VF, currently reset of PF with
1315 	 * SR-IOV activation is not supported. It might be supported later.
1316 	 */
1317 	if (dev->data->sriov.active)
1318 		return -ENOTSUP;
1319 
1320 	ret = eth_ngbe_dev_uninit(dev);
1321 	if (ret != 0)
1322 		return ret;
1323 
1324 	ret = eth_ngbe_dev_init(dev, NULL);
1325 
1326 	return ret;
1327 }
1328 
1329 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1330 	{                                                       \
1331 		uint32_t current_counter = rd32(hw, reg);       \
1332 		if (current_counter < last_counter)             \
1333 			current_counter += 0x100000000LL;       \
1334 		if (!hw->offset_loaded)                         \
1335 			last_counter = current_counter;         \
1336 		counter = current_counter - last_counter;       \
1337 		counter &= 0xFFFFFFFFLL;                        \
1338 	}
1339 
1340 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1341 	{                                                                \
1342 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1343 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1344 		uint64_t current_counter = (current_counter_msb << 32) | \
1345 			current_counter_lsb;                             \
1346 		if (current_counter < last_counter)                      \
1347 			current_counter += 0x1000000000LL;               \
1348 		if (!hw->offset_loaded)                                  \
1349 			last_counter = current_counter;                  \
1350 		counter = current_counter - last_counter;                \
1351 		counter &= 0xFFFFFFFFFLL;                                \
1352 	}
1353 
1354 void
1355 ngbe_read_stats_registers(struct ngbe_hw *hw,
1356 			   struct ngbe_hw_stats *hw_stats)
1357 {
1358 	unsigned int i;
1359 
1360 	/* QP Stats */
1361 	for (i = 0; i < hw->nb_rx_queues; i++) {
1362 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1363 			hw->qp_last[i].rx_qp_packets,
1364 			hw_stats->qp[i].rx_qp_packets);
1365 		UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1366 			hw->qp_last[i].rx_qp_bytes,
1367 			hw_stats->qp[i].rx_qp_bytes);
1368 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1369 			hw->qp_last[i].rx_qp_mc_packets,
1370 			hw_stats->qp[i].rx_qp_mc_packets);
1371 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1372 			hw->qp_last[i].rx_qp_bc_packets,
1373 			hw_stats->qp[i].rx_qp_bc_packets);
1374 	}
1375 
1376 	for (i = 0; i < hw->nb_tx_queues; i++) {
1377 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1378 			hw->qp_last[i].tx_qp_packets,
1379 			hw_stats->qp[i].tx_qp_packets);
1380 		UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1381 			hw->qp_last[i].tx_qp_bytes,
1382 			hw_stats->qp[i].tx_qp_bytes);
1383 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1384 			hw->qp_last[i].tx_qp_mc_packets,
1385 			hw_stats->qp[i].tx_qp_mc_packets);
1386 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1387 			hw->qp_last[i].tx_qp_bc_packets,
1388 			hw_stats->qp[i].tx_qp_bc_packets);
1389 	}
1390 
1391 	/* PB Stats */
1392 	hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1393 	hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1394 	hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1395 	hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1396 	hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1397 	hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1398 
1399 	hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1400 	hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1401 
1402 	/* DMA Stats */
1403 	hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1404 	hw_stats->tx_dma_drop += rd32(hw, NGBE_DMATXDROP);
1405 	hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1406 	hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1407 	hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1408 	hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1409 	hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1410 
1411 	/* MAC Stats */
1412 	hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1413 	hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1414 	hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1415 
1416 	hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1417 	hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1418 	hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1419 
1420 	hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1421 	hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1422 
1423 	hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1424 	hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1425 	hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1426 	hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1427 	hw_stats->rx_size_512_to_1023_packets +=
1428 			rd64(hw, NGBE_MACRX512TO1023L);
1429 	hw_stats->rx_size_1024_to_max_packets +=
1430 			rd64(hw, NGBE_MACRX1024TOMAXL);
1431 	hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1432 	hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1433 	hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1434 	hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1435 	hw_stats->tx_size_512_to_1023_packets +=
1436 			rd64(hw, NGBE_MACTX512TO1023L);
1437 	hw_stats->tx_size_1024_to_max_packets +=
1438 			rd64(hw, NGBE_MACTX1024TOMAXL);
1439 
1440 	hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1441 	hw_stats->rx_oversize_cnt += rd32(hw, NGBE_MACRXOVERSIZE);
1442 	hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1443 
1444 	/* MNG Stats */
1445 	hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1446 	hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1447 	hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1448 	hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1449 
1450 	/* MACsec Stats */
1451 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1452 	hw_stats->tx_macsec_pkts_encrypted +=
1453 			rd32(hw, NGBE_LSECTX_ENCPKT);
1454 	hw_stats->tx_macsec_pkts_protected +=
1455 			rd32(hw, NGBE_LSECTX_PROTPKT);
1456 	hw_stats->tx_macsec_octets_encrypted +=
1457 			rd32(hw, NGBE_LSECTX_ENCOCT);
1458 	hw_stats->tx_macsec_octets_protected +=
1459 			rd32(hw, NGBE_LSECTX_PROTOCT);
1460 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1461 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1462 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1463 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1464 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1465 	hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1466 	hw_stats->rx_macsec_sc_pkts_unchecked +=
1467 			rd32(hw, NGBE_LSECRX_UNCHKPKT);
1468 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1469 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1470 	for (i = 0; i < 2; i++) {
1471 		hw_stats->rx_macsec_sa_pkts_ok +=
1472 			rd32(hw, NGBE_LSECRX_OKPKT(i));
1473 		hw_stats->rx_macsec_sa_pkts_invalid +=
1474 			rd32(hw, NGBE_LSECRX_INVPKT(i));
1475 		hw_stats->rx_macsec_sa_pkts_notvalid +=
1476 			rd32(hw, NGBE_LSECRX_BADPKT(i));
1477 	}
1478 	for (i = 0; i < 4; i++) {
1479 		hw_stats->rx_macsec_sa_pkts_unusedsa +=
1480 			rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1481 		hw_stats->rx_macsec_sa_pkts_notusingsa +=
1482 			rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1483 	}
1484 	hw_stats->rx_total_missed_packets =
1485 			hw_stats->rx_up_dropped;
1486 }
1487 
1488 static int
1489 ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1490 {
1491 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1492 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1493 	struct ngbe_stat_mappings *stat_mappings =
1494 			NGBE_DEV_STAT_MAPPINGS(dev);
1495 	uint32_t i, j;
1496 
1497 	ngbe_read_stats_registers(hw, hw_stats);
1498 
1499 	if (stats == NULL)
1500 		return -EINVAL;
1501 
1502 	/* Fill out the rte_eth_stats statistics structure */
1503 	stats->ipackets = hw_stats->rx_packets;
1504 	stats->ibytes = hw_stats->rx_bytes;
1505 	stats->opackets = hw_stats->tx_packets;
1506 	stats->obytes = hw_stats->tx_bytes;
1507 
1508 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1509 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1510 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1511 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1512 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1513 	for (i = 0; i < NGBE_MAX_QP; i++) {
1514 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1515 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1516 		uint32_t q_map;
1517 
1518 		q_map = (stat_mappings->rqsm[n] >> offset)
1519 				& QMAP_FIELD_RESERVED_BITS_MASK;
1520 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1521 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1522 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1523 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1524 
1525 		q_map = (stat_mappings->tqsm[n] >> offset)
1526 				& QMAP_FIELD_RESERVED_BITS_MASK;
1527 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1528 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1529 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1530 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1531 	}
1532 
1533 	/* Rx Errors */
1534 	stats->imissed  = hw_stats->rx_total_missed_packets +
1535 			  hw_stats->rx_dma_drop;
1536 	stats->ierrors  = hw_stats->rx_crc_errors +
1537 			  hw_stats->rx_mac_short_packet_dropped +
1538 			  hw_stats->rx_length_errors +
1539 			  hw_stats->rx_undersize_errors +
1540 			  hw_stats->rdb_drp_cnt +
1541 			  hw_stats->rx_illegal_byte_errors +
1542 			  hw_stats->rx_error_bytes +
1543 			  hw_stats->rx_fragment_errors;
1544 
1545 	/* Tx Errors */
1546 	stats->oerrors  = 0;
1547 	return 0;
1548 }
1549 
1550 static int
1551 ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1552 {
1553 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1554 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1555 
1556 	/* HW registers are cleared on read */
1557 	hw->offset_loaded = 0;
1558 	ngbe_dev_stats_get(dev, NULL);
1559 	hw->offset_loaded = 1;
1560 
1561 	/* Reset software totals */
1562 	memset(hw_stats, 0, sizeof(*hw_stats));
1563 
1564 	return 0;
1565 }
1566 
1567 /* This function calculates the number of xstats based on the current config */
1568 static unsigned
1569 ngbe_xstats_calc_num(struct rte_eth_dev *dev)
1570 {
1571 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1572 	return NGBE_NB_HW_STATS +
1573 	       NGBE_NB_QP_STATS * nb_queues;
1574 }
1575 
1576 static inline int
1577 ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
1578 {
1579 	int nb, st;
1580 
1581 	/* Extended stats from ngbe_hw_stats */
1582 	if (id < NGBE_NB_HW_STATS) {
1583 		snprintf(name, size, "[hw]%s",
1584 			rte_ngbe_stats_strings[id].name);
1585 		return 0;
1586 	}
1587 	id -= NGBE_NB_HW_STATS;
1588 
1589 	/* Queue Stats */
1590 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1591 		nb = id / NGBE_NB_QP_STATS;
1592 		st = id % NGBE_NB_QP_STATS;
1593 		snprintf(name, size, "[q%u]%s", nb,
1594 			rte_ngbe_qp_strings[st].name);
1595 		return 0;
1596 	}
1597 	id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
1598 
1599 	return -(int)(id + 1);
1600 }
1601 
1602 static inline int
1603 ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
1604 {
1605 	int nb, st;
1606 
1607 	/* Extended stats from ngbe_hw_stats */
1608 	if (id < NGBE_NB_HW_STATS) {
1609 		*offset = rte_ngbe_stats_strings[id].offset;
1610 		return 0;
1611 	}
1612 	id -= NGBE_NB_HW_STATS;
1613 
1614 	/* Queue Stats */
1615 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
1616 		nb = id / NGBE_NB_QP_STATS;
1617 		st = id % NGBE_NB_QP_STATS;
1618 		*offset = rte_ngbe_qp_strings[st].offset +
1619 			nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
1620 		return 0;
1621 	}
1622 
1623 	return -1;
1624 }
1625 
1626 static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1627 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
1628 {
1629 	unsigned int i, count;
1630 
1631 	count = ngbe_xstats_calc_num(dev);
1632 	if (xstats_names == NULL)
1633 		return count;
1634 
1635 	/* Note: limit >= cnt_stats checked upstream
1636 	 * in rte_eth_xstats_names()
1637 	 */
1638 	limit = min(limit, count);
1639 
1640 	/* Extended stats from ngbe_hw_stats */
1641 	for (i = 0; i < limit; i++) {
1642 		if (ngbe_get_name_by_id(i, xstats_names[i].name,
1643 			sizeof(xstats_names[i].name))) {
1644 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1645 			break;
1646 		}
1647 	}
1648 
1649 	return i;
1650 }
1651 
1652 static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1653 	const uint64_t *ids,
1654 	struct rte_eth_xstat_name *xstats_names,
1655 	unsigned int limit)
1656 {
1657 	unsigned int i;
1658 
1659 	if (ids == NULL)
1660 		return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
1661 
1662 	for (i = 0; i < limit; i++) {
1663 		if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
1664 				sizeof(xstats_names[i].name))) {
1665 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1666 			return -1;
1667 		}
1668 	}
1669 
1670 	return i;
1671 }
1672 
1673 static int
1674 ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1675 					 unsigned int limit)
1676 {
1677 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1678 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1679 	unsigned int i, count;
1680 
1681 	ngbe_read_stats_registers(hw, hw_stats);
1682 
1683 	/* If this is a reset xstats is NULL, and we have cleared the
1684 	 * registers by reading them.
1685 	 */
1686 	count = ngbe_xstats_calc_num(dev);
1687 	if (xstats == NULL)
1688 		return count;
1689 
1690 	limit = min(limit, ngbe_xstats_calc_num(dev));
1691 
1692 	/* Extended stats from ngbe_hw_stats */
1693 	for (i = 0; i < limit; i++) {
1694 		uint32_t offset = 0;
1695 
1696 		if (ngbe_get_offset_by_id(i, &offset)) {
1697 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1698 			break;
1699 		}
1700 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
1701 		xstats[i].id = i;
1702 	}
1703 
1704 	return i;
1705 }
1706 
1707 static int
1708 ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
1709 					 unsigned int limit)
1710 {
1711 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1712 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1713 	unsigned int i, count;
1714 
1715 	ngbe_read_stats_registers(hw, hw_stats);
1716 
1717 	/* If this is a reset xstats is NULL, and we have cleared the
1718 	 * registers by reading them.
1719 	 */
1720 	count = ngbe_xstats_calc_num(dev);
1721 	if (values == NULL)
1722 		return count;
1723 
1724 	limit = min(limit, ngbe_xstats_calc_num(dev));
1725 
1726 	/* Extended stats from ngbe_hw_stats */
1727 	for (i = 0; i < limit; i++) {
1728 		uint32_t offset;
1729 
1730 		if (ngbe_get_offset_by_id(i, &offset)) {
1731 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1732 			break;
1733 		}
1734 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1735 	}
1736 
1737 	return i;
1738 }
1739 
1740 static int
1741 ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1742 		uint64_t *values, unsigned int limit)
1743 {
1744 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1745 	unsigned int i;
1746 
1747 	if (ids == NULL)
1748 		return ngbe_dev_xstats_get_(dev, values, limit);
1749 
1750 	for (i = 0; i < limit; i++) {
1751 		uint32_t offset;
1752 
1753 		if (ngbe_get_offset_by_id(ids[i], &offset)) {
1754 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
1755 			break;
1756 		}
1757 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
1758 	}
1759 
1760 	return i;
1761 }
1762 
1763 static int
1764 ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
1765 {
1766 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1767 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1768 
1769 	/* HW registers are cleared on read */
1770 	hw->offset_loaded = 0;
1771 	ngbe_read_stats_registers(hw, hw_stats);
1772 	hw->offset_loaded = 1;
1773 
1774 	/* Reset software totals */
1775 	memset(hw_stats, 0, sizeof(*hw_stats));
1776 
1777 	return 0;
1778 }
1779 
1780 static int
1781 ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1782 {
1783 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1784 	int ret;
1785 
1786 	ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1787 
1788 	if (ret < 0)
1789 		return -EINVAL;
1790 
1791 	ret += 1; /* add the size of '\0' */
1792 	if (fw_size < (size_t)ret)
1793 		return ret;
1794 
1795 	return 0;
1796 }
1797 
1798 static int
1799 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1800 {
1801 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1802 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1803 
1804 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1805 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1806 	dev_info->min_rx_bufsize = 1024;
1807 	dev_info->max_rx_pktlen = 15872;
1808 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1809 	dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1810 	dev_info->max_vfs = pci_dev->max_vfs;
1811 	dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
1812 	dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
1813 				     dev_info->rx_queue_offload_capa);
1814 	dev_info->tx_queue_offload_capa = 0;
1815 	dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
1816 
1817 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1818 		.rx_thresh = {
1819 			.pthresh = NGBE_DEFAULT_RX_PTHRESH,
1820 			.hthresh = NGBE_DEFAULT_RX_HTHRESH,
1821 			.wthresh = NGBE_DEFAULT_RX_WTHRESH,
1822 		},
1823 		.rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
1824 		.rx_drop_en = 0,
1825 		.offloads = 0,
1826 	};
1827 
1828 	dev_info->default_txconf = (struct rte_eth_txconf) {
1829 		.tx_thresh = {
1830 			.pthresh = NGBE_DEFAULT_TX_PTHRESH,
1831 			.hthresh = NGBE_DEFAULT_TX_HTHRESH,
1832 			.wthresh = NGBE_DEFAULT_TX_WTHRESH,
1833 		},
1834 		.tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1835 		.offloads = 0,
1836 	};
1837 
1838 	dev_info->rx_desc_lim = rx_desc_lim;
1839 	dev_info->tx_desc_lim = tx_desc_lim;
1840 
1841 	dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
1842 	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1843 	dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
1844 
1845 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1846 				RTE_ETH_LINK_SPEED_10M;
1847 
1848 	/* Driver-preferred Rx/Tx parameters */
1849 	dev_info->default_rxportconf.burst_size = 32;
1850 	dev_info->default_txportconf.burst_size = 32;
1851 	dev_info->default_rxportconf.nb_queues = 1;
1852 	dev_info->default_txportconf.nb_queues = 1;
1853 	dev_info->default_rxportconf.ring_size = 256;
1854 	dev_info->default_txportconf.ring_size = 256;
1855 
1856 	return 0;
1857 }
1858 
1859 const uint32_t *
1860 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1861 {
1862 	if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1863 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
1864 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
1865 	    dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1866 		return ngbe_get_supported_ptypes();
1867 
1868 	return NULL;
1869 }
1870 
1871 static void
1872 ngbe_dev_overheat(struct rte_eth_dev *dev)
1873 {
1874 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1875 	s32 temp_state;
1876 
1877 	temp_state = hw->mac.check_overtemp(hw);
1878 	if (!temp_state)
1879 		return;
1880 
1881 	if (temp_state == NGBE_ERR_UNDERTEMP) {
1882 		PMD_DRV_LOG(CRIT, "Network adapter has been started again, "
1883 			"since the temperature has been back to normal state.");
1884 		wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, NGBE_PBRXCTL_ENA);
1885 		ngbe_dev_set_link_up(dev);
1886 	} else if (temp_state == NGBE_ERR_OVERTEMP) {
1887 		PMD_DRV_LOG(CRIT, "Network adapter has been stopped because it has over heated.");
1888 		wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0);
1889 		ngbe_dev_set_link_down(dev);
1890 	}
1891 }
1892 
1893 /* return 0 means link status changed, -1 means not changed */
1894 int
1895 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1896 			    int wait_to_complete)
1897 {
1898 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1899 	struct rte_eth_link link;
1900 	u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1901 	u32 lan_speed = 0;
1902 	bool link_up;
1903 	int err;
1904 	int wait = 1;
1905 
1906 	memset(&link, 0, sizeof(link));
1907 	link.link_status = RTE_ETH_LINK_DOWN;
1908 	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1909 	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1910 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1911 			~RTE_ETH_LINK_SPEED_AUTONEG);
1912 
1913 	hw->mac.get_link_status = true;
1914 
1915 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
1916 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1917 		wait = 0;
1918 
1919 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1920 	if (err != 0) {
1921 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1922 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1923 		return rte_eth_linkstatus_set(dev, &link);
1924 	}
1925 
1926 	if (!link_up)
1927 		return rte_eth_linkstatus_set(dev, &link);
1928 
1929 	link.link_status = RTE_ETH_LINK_UP;
1930 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1931 
1932 	switch (link_speed) {
1933 	default:
1934 	case NGBE_LINK_SPEED_UNKNOWN:
1935 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1936 		break;
1937 
1938 	case NGBE_LINK_SPEED_10M_FULL:
1939 		link.link_speed = RTE_ETH_SPEED_NUM_10M;
1940 		lan_speed = 0;
1941 		break;
1942 
1943 	case NGBE_LINK_SPEED_100M_FULL:
1944 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
1945 		lan_speed = 1;
1946 		break;
1947 
1948 	case NGBE_LINK_SPEED_1GB_FULL:
1949 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
1950 		lan_speed = 2;
1951 		break;
1952 	}
1953 
1954 	if (hw->is_pf) {
1955 		wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1956 		if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1957 				NGBE_LINK_SPEED_100M_FULL |
1958 				NGBE_LINK_SPEED_10M_FULL)) {
1959 			wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1960 				NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1961 		}
1962 	}
1963 
1964 	return rte_eth_linkstatus_set(dev, &link);
1965 }
1966 
1967 static int
1968 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1969 {
1970 	return ngbe_dev_link_update_share(dev, wait_to_complete);
1971 }
1972 
1973 static int
1974 ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1975 {
1976 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1977 	uint32_t fctrl;
1978 
1979 	fctrl = rd32(hw, NGBE_PSRCTL);
1980 	fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
1981 	wr32(hw, NGBE_PSRCTL, fctrl);
1982 
1983 	return 0;
1984 }
1985 
1986 static int
1987 ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1988 {
1989 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1990 	uint32_t fctrl;
1991 
1992 	fctrl = rd32(hw, NGBE_PSRCTL);
1993 	fctrl &= (~NGBE_PSRCTL_UCP);
1994 	if (dev->data->all_multicast == 1)
1995 		fctrl |= NGBE_PSRCTL_MCP;
1996 	else
1997 		fctrl &= (~NGBE_PSRCTL_MCP);
1998 	wr32(hw, NGBE_PSRCTL, fctrl);
1999 
2000 	return 0;
2001 }
2002 
2003 static int
2004 ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2005 {
2006 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2007 	uint32_t fctrl;
2008 
2009 	fctrl = rd32(hw, NGBE_PSRCTL);
2010 	fctrl |= NGBE_PSRCTL_MCP;
2011 	wr32(hw, NGBE_PSRCTL, fctrl);
2012 
2013 	return 0;
2014 }
2015 
2016 static int
2017 ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2018 {
2019 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2020 	uint32_t fctrl;
2021 
2022 	if (dev->data->promiscuous == 1)
2023 		return 0; /* must remain in all_multicast mode */
2024 
2025 	fctrl = rd32(hw, NGBE_PSRCTL);
2026 	fctrl &= (~NGBE_PSRCTL_MCP);
2027 	wr32(hw, NGBE_PSRCTL, fctrl);
2028 
2029 	return 0;
2030 }
2031 
2032 /**
2033  * It clears the interrupt causes and enables the interrupt.
2034  * It will be called once only during NIC initialized.
2035  *
2036  * @param dev
2037  *  Pointer to struct rte_eth_dev.
2038  * @param on
2039  *  Enable or Disable.
2040  *
2041  * @return
2042  *  - On success, zero.
2043  *  - On failure, a negative value.
2044  */
2045 static int
2046 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2047 {
2048 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2049 
2050 	ngbe_dev_link_status_print(dev);
2051 	if (on != 0) {
2052 		intr->mask_misc |= NGBE_ICRMISC_PHY;
2053 		intr->mask_misc |= NGBE_ICRMISC_GPIO;
2054 	} else {
2055 		intr->mask_misc &= ~NGBE_ICRMISC_PHY;
2056 		intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
2057 	}
2058 
2059 	return 0;
2060 }
2061 
2062 /**
2063  * It clears the interrupt causes and enables the interrupt.
2064  * It will be called once only during NIC initialized.
2065  *
2066  * @param dev
2067  *  Pointer to struct rte_eth_dev.
2068  *
2069  * @return
2070  *  - On success, zero.
2071  *  - On failure, a negative value.
2072  */
2073 static int
2074 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
2075 {
2076 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2077 	u64 mask;
2078 
2079 	mask = NGBE_ICR_MASK;
2080 	mask &= (1ULL << NGBE_MISC_VEC_ID);
2081 	intr->mask |= mask;
2082 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
2083 
2084 	return 0;
2085 }
2086 
2087 /**
2088  * It clears the interrupt causes and enables the interrupt.
2089  * It will be called once only during NIC initialized.
2090  *
2091  * @param dev
2092  *  Pointer to struct rte_eth_dev.
2093  *
2094  * @return
2095  *  - On success, zero.
2096  *  - On failure, a negative value.
2097  */
2098 static int
2099 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2100 {
2101 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2102 	u64 mask;
2103 
2104 	mask = NGBE_ICR_MASK;
2105 	mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
2106 	intr->mask |= mask;
2107 
2108 	return 0;
2109 }
2110 
2111 /**
2112  * It clears the interrupt causes and enables the interrupt.
2113  * It will be called once only during NIC initialized.
2114  *
2115  * @param dev
2116  *  Pointer to struct rte_eth_dev.
2117  *
2118  * @return
2119  *  - On success, zero.
2120  *  - On failure, a negative value.
2121  */
2122 static int
2123 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2124 {
2125 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2126 
2127 	intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
2128 
2129 	return 0;
2130 }
2131 
2132 /*
2133  * It reads ICR and sets flag for the link_update.
2134  *
2135  * @param dev
2136  *  Pointer to struct rte_eth_dev.
2137  *
2138  * @return
2139  *  - On success, zero.
2140  *  - On failure, a negative value.
2141  */
2142 static int
2143 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2144 {
2145 	uint32_t eicr;
2146 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2147 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2148 
2149 	/* read-on-clear nic registers here */
2150 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2151 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2152 
2153 	intr->flags = 0;
2154 
2155 	/* set flag for async link update */
2156 	if (eicr & NGBE_ICRMISC_PHY)
2157 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2158 
2159 	if (eicr & NGBE_ICRMISC_VFMBX)
2160 		intr->flags |= NGBE_FLAG_MAILBOX;
2161 
2162 	if (eicr & NGBE_ICRMISC_LNKSEC)
2163 		intr->flags |= NGBE_FLAG_MACSEC;
2164 
2165 	if (eicr & NGBE_ICRMISC_GPIO)
2166 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2167 
2168 	if (eicr & NGBE_ICRMISC_HEAT)
2169 		intr->flags |= NGBE_FLAG_OVERHEAT;
2170 
2171 	((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0;
2172 
2173 	return 0;
2174 }
2175 
2176 /**
2177  * It gets and then prints the link status.
2178  *
2179  * @param dev
2180  *  Pointer to struct rte_eth_dev.
2181  *
2182  * @return
2183  *  - On success, zero.
2184  *  - On failure, a negative value.
2185  */
2186 static void
2187 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2188 {
2189 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2190 	struct rte_eth_link link;
2191 
2192 	rte_eth_linkstatus_get(dev, &link);
2193 
2194 	if (link.link_status == RTE_ETH_LINK_UP) {
2195 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2196 					(int)(dev->data->port_id),
2197 					(unsigned int)link.link_speed,
2198 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2199 					"full-duplex" : "half-duplex");
2200 	} else {
2201 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
2202 				(int)(dev->data->port_id));
2203 	}
2204 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2205 				pci_dev->addr.domain,
2206 				pci_dev->addr.bus,
2207 				pci_dev->addr.devid,
2208 				pci_dev->addr.function);
2209 }
2210 
2211 /*
2212  * It executes link_update after knowing an interrupt occurred.
2213  *
2214  * @param dev
2215  *  Pointer to struct rte_eth_dev.
2216  *
2217  * @return
2218  *  - On success, zero.
2219  *  - On failure, a negative value.
2220  */
2221 static int
2222 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2223 {
2224 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2225 
2226 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2227 
2228 	if (intr->flags & NGBE_FLAG_MAILBOX) {
2229 		ngbe_pf_mbx_process(dev);
2230 		intr->flags &= ~NGBE_FLAG_MAILBOX;
2231 	}
2232 
2233 	if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2234 		struct rte_eth_link link;
2235 
2236 		/*get the link status before link update, for predicting later*/
2237 		rte_eth_linkstatus_get(dev, &link);
2238 
2239 		ngbe_dev_link_update(dev, 0);
2240 		intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2241 		ngbe_dev_link_status_print(dev);
2242 		if (dev->data->dev_link.link_speed != link.link_speed)
2243 			rte_eth_dev_callback_process(dev,
2244 				RTE_ETH_EVENT_INTR_LSC, NULL);
2245 	}
2246 
2247 	if (intr->flags & NGBE_FLAG_OVERHEAT) {
2248 		ngbe_dev_overheat(dev);
2249 		intr->flags &= ~NGBE_FLAG_OVERHEAT;
2250 	}
2251 
2252 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
2253 	ngbe_enable_intr(dev);
2254 
2255 	return 0;
2256 }
2257 
2258 /**
2259  * Interrupt handler triggered by NIC  for handling
2260  * specific interrupt.
2261  *
2262  * @param param
2263  *  The address of parameter (struct rte_eth_dev *) registered before.
2264  */
2265 static void
2266 ngbe_dev_interrupt_handler(void *param)
2267 {
2268 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2269 
2270 	ngbe_dev_interrupt_get_status(dev);
2271 	ngbe_dev_interrupt_action(dev);
2272 }
2273 
2274 static int
2275 ngbe_dev_led_on(struct rte_eth_dev *dev)
2276 {
2277 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2278 	return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
2279 }
2280 
2281 static int
2282 ngbe_dev_led_off(struct rte_eth_dev *dev)
2283 {
2284 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2285 	return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
2286 }
2287 
2288 static int
2289 ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2290 {
2291 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2292 	uint32_t mflcn_reg;
2293 	uint32_t fccfg_reg;
2294 	int rx_pause;
2295 	int tx_pause;
2296 
2297 	fc_conf->pause_time = hw->fc.pause_time;
2298 	fc_conf->high_water = hw->fc.high_water;
2299 	fc_conf->low_water = hw->fc.low_water;
2300 	fc_conf->send_xon = hw->fc.send_xon;
2301 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2302 
2303 	/*
2304 	 * Return rx_pause status according to actual setting of
2305 	 * RXFCCFG register.
2306 	 */
2307 	mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2308 	if (mflcn_reg & NGBE_RXFCCFG_FC)
2309 		rx_pause = 1;
2310 	else
2311 		rx_pause = 0;
2312 
2313 	/*
2314 	 * Return tx_pause status according to actual setting of
2315 	 * TXFCCFG register.
2316 	 */
2317 	fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2318 	if (fccfg_reg & NGBE_TXFCCFG_FC)
2319 		tx_pause = 1;
2320 	else
2321 		tx_pause = 0;
2322 
2323 	if (rx_pause && tx_pause)
2324 		fc_conf->mode = RTE_ETH_FC_FULL;
2325 	else if (rx_pause)
2326 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2327 	else if (tx_pause)
2328 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2329 	else
2330 		fc_conf->mode = RTE_ETH_FC_NONE;
2331 
2332 	return 0;
2333 }
2334 
2335 static int
2336 ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2337 {
2338 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2339 	int err;
2340 	uint32_t rx_buf_size;
2341 	uint32_t max_high_water;
2342 	enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2343 		ngbe_fc_none,
2344 		ngbe_fc_rx_pause,
2345 		ngbe_fc_tx_pause,
2346 		ngbe_fc_full
2347 	};
2348 
2349 	PMD_INIT_FUNC_TRACE();
2350 
2351 	rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2352 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2353 
2354 	/*
2355 	 * At least reserve one Ethernet frame for watermark
2356 	 * high_water/low_water in kilo bytes for ngbe
2357 	 */
2358 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2359 	if (fc_conf->high_water > max_high_water ||
2360 	    fc_conf->high_water < fc_conf->low_water) {
2361 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2362 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2363 		return -EINVAL;
2364 	}
2365 
2366 	hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2367 	hw->fc.pause_time     = fc_conf->pause_time;
2368 	hw->fc.high_water     = fc_conf->high_water;
2369 	hw->fc.low_water      = fc_conf->low_water;
2370 	hw->fc.send_xon       = fc_conf->send_xon;
2371 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2372 
2373 	err = hw->mac.fc_enable(hw);
2374 
2375 	/* Not negotiated is not an error case */
2376 	if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2377 		wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2378 		      (fc_conf->mac_ctrl_frame_fwd
2379 		       ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2380 		ngbe_flush(hw);
2381 
2382 		return 0;
2383 	}
2384 
2385 	PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2386 	return -EIO;
2387 }
2388 
2389 int
2390 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2391 			  struct rte_eth_rss_reta_entry64 *reta_conf,
2392 			  uint16_t reta_size)
2393 {
2394 	uint8_t i, j, mask;
2395 	uint32_t reta;
2396 	uint16_t idx, shift;
2397 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2398 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2399 
2400 	PMD_INIT_FUNC_TRACE();
2401 
2402 	if (!hw->is_pf) {
2403 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
2404 			"NIC.");
2405 		return -ENOTSUP;
2406 	}
2407 
2408 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2409 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2410 			"(%d) doesn't match the number hardware can supported "
2411 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2412 		return -EINVAL;
2413 	}
2414 
2415 	for (i = 0; i < reta_size; i += 4) {
2416 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2417 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2418 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2419 		if (!mask)
2420 			continue;
2421 
2422 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2423 		for (j = 0; j < 4; j++) {
2424 			if (RS8(mask, j, 0x1)) {
2425 				reta  &= ~(MS32(8 * j, 0xFF));
2426 				reta |= LS32(reta_conf[idx].reta[shift + j],
2427 						8 * j, 0xFF);
2428 			}
2429 		}
2430 		wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
2431 	}
2432 	adapter->rss_reta_updated = 1;
2433 
2434 	return 0;
2435 }
2436 
2437 int
2438 ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2439 			 struct rte_eth_rss_reta_entry64 *reta_conf,
2440 			 uint16_t reta_size)
2441 {
2442 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2443 	uint8_t i, j, mask;
2444 	uint32_t reta;
2445 	uint16_t idx, shift;
2446 
2447 	PMD_INIT_FUNC_TRACE();
2448 
2449 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2450 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2451 			"(%d) doesn't match the number hardware can supported "
2452 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2453 		return -EINVAL;
2454 	}
2455 
2456 	for (i = 0; i < reta_size; i += 4) {
2457 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2458 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2459 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
2460 		if (!mask)
2461 			continue;
2462 
2463 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
2464 		for (j = 0; j < 4; j++) {
2465 			if (RS8(mask, j, 0x1))
2466 				reta_conf[idx].reta[shift + j] =
2467 					(uint16_t)RS32(reta, 8 * j, 0xFF);
2468 		}
2469 	}
2470 
2471 	return 0;
2472 }
2473 
2474 static int
2475 ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2476 				uint32_t index, uint32_t pool)
2477 {
2478 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2479 	uint32_t enable_addr = 1;
2480 
2481 	return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2482 			     pool, enable_addr);
2483 }
2484 
2485 static void
2486 ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2487 {
2488 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2489 
2490 	ngbe_clear_rar(hw, index);
2491 }
2492 
2493 static int
2494 ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2495 {
2496 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2497 
2498 	ngbe_remove_rar(dev, 0);
2499 	ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2500 
2501 	return 0;
2502 }
2503 
2504 static int
2505 ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2506 {
2507 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2508 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
2509 	struct rte_eth_dev_data *dev_data = dev->data;
2510 
2511 	/* If device is started, refuse mtu that requires the support of
2512 	 * scattered packets when this feature has not been enabled before.
2513 	 */
2514 	if (dev_data->dev_started && !dev_data->scattered_rx &&
2515 	    (frame_size + 2 * RTE_VLAN_HLEN >
2516 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2517 		PMD_INIT_LOG(ERR, "Stop port first.");
2518 		return -EINVAL;
2519 	}
2520 
2521 	wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
2522 		NGBE_FRMSZ_MAX(frame_size));
2523 
2524 	return 0;
2525 }
2526 
2527 static uint32_t
2528 ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2529 {
2530 	uint32_t vector = 0;
2531 
2532 	switch (hw->mac.mc_filter_type) {
2533 	case 0:   /* use bits [47:36] of the address */
2534 		vector = ((uc_addr->addr_bytes[4] >> 4) |
2535 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
2536 		break;
2537 	case 1:   /* use bits [46:35] of the address */
2538 		vector = ((uc_addr->addr_bytes[4] >> 3) |
2539 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
2540 		break;
2541 	case 2:   /* use bits [45:34] of the address */
2542 		vector = ((uc_addr->addr_bytes[4] >> 2) |
2543 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
2544 		break;
2545 	case 3:   /* use bits [43:32] of the address */
2546 		vector = ((uc_addr->addr_bytes[4]) |
2547 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
2548 		break;
2549 	default:  /* Invalid mc_filter_type */
2550 		break;
2551 	}
2552 
2553 	/* vector can only be 12-bits or boundary will be exceeded */
2554 	vector &= 0xFFF;
2555 	return vector;
2556 }
2557 
2558 static int
2559 ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2560 			struct rte_ether_addr *mac_addr, uint8_t on)
2561 {
2562 	uint32_t vector;
2563 	uint32_t uta_idx;
2564 	uint32_t reg_val;
2565 	uint32_t uta_mask;
2566 	uint32_t psrctl;
2567 
2568 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2569 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2570 
2571 	vector = ngbe_uta_vector(hw, mac_addr);
2572 	uta_idx = (vector >> 5) & 0x7F;
2573 	uta_mask = 0x1UL << (vector & 0x1F);
2574 
2575 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2576 		return 0;
2577 
2578 	reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2579 	if (on) {
2580 		uta_info->uta_in_use++;
2581 		reg_val |= uta_mask;
2582 		uta_info->uta_shadow[uta_idx] |= uta_mask;
2583 	} else {
2584 		uta_info->uta_in_use--;
2585 		reg_val &= ~uta_mask;
2586 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2587 	}
2588 
2589 	wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2590 
2591 	psrctl = rd32(hw, NGBE_PSRCTL);
2592 	if (uta_info->uta_in_use > 0)
2593 		psrctl |= NGBE_PSRCTL_UCHFENA;
2594 	else
2595 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2596 
2597 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2598 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2599 	wr32(hw, NGBE_PSRCTL, psrctl);
2600 
2601 	return 0;
2602 }
2603 
2604 static int
2605 ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2606 {
2607 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2608 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2609 	uint32_t psrctl;
2610 	int i;
2611 
2612 	if (on) {
2613 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2614 			uta_info->uta_shadow[i] = ~0;
2615 			wr32(hw, NGBE_UCADDRTBL(i), ~0);
2616 		}
2617 	} else {
2618 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2619 			uta_info->uta_shadow[i] = 0;
2620 			wr32(hw, NGBE_UCADDRTBL(i), 0);
2621 		}
2622 	}
2623 
2624 	psrctl = rd32(hw, NGBE_PSRCTL);
2625 	if (on)
2626 		psrctl |= NGBE_PSRCTL_UCHFENA;
2627 	else
2628 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2629 
2630 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2631 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2632 	wr32(hw, NGBE_PSRCTL, psrctl);
2633 
2634 	return 0;
2635 }
2636 
2637 /**
2638  * Set the IVAR registers, mapping interrupt causes to vectors
2639  * @param hw
2640  *  pointer to ngbe_hw struct
2641  * @direction
2642  *  0 for Rx, 1 for Tx, -1 for other causes
2643  * @queue
2644  *  queue to map the corresponding interrupt to
2645  * @msix_vector
2646  *  the vector to map to the corresponding queue
2647  */
2648 void
2649 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
2650 		   uint8_t queue, uint8_t msix_vector)
2651 {
2652 	uint32_t tmp, idx;
2653 
2654 	if (direction == -1) {
2655 		/* other causes */
2656 		msix_vector |= NGBE_IVARMISC_VLD;
2657 		idx = 0;
2658 		tmp = rd32(hw, NGBE_IVARMISC);
2659 		tmp &= ~(0xFF << idx);
2660 		tmp |= (msix_vector << idx);
2661 		wr32(hw, NGBE_IVARMISC, tmp);
2662 	} else {
2663 		/* rx or tx causes */
2664 		/* Workaround for ICR lost */
2665 		idx = ((16 * (queue & 1)) + (8 * direction));
2666 		tmp = rd32(hw, NGBE_IVAR(queue >> 1));
2667 		tmp &= ~(0xFF << idx);
2668 		tmp |= (msix_vector << idx);
2669 		wr32(hw, NGBE_IVAR(queue >> 1), tmp);
2670 	}
2671 }
2672 
2673 /**
2674  * Sets up the hardware to properly generate MSI-X interrupts
2675  * @hw
2676  *  board private structure
2677  */
2678 static void
2679 ngbe_configure_msix(struct rte_eth_dev *dev)
2680 {
2681 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2682 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2683 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2684 	uint32_t queue_id, base = NGBE_MISC_VEC_ID;
2685 	uint32_t vec = NGBE_MISC_VEC_ID;
2686 	uint32_t gpie;
2687 
2688 	/*
2689 	 * Won't configure MSI-X register if no mapping is done
2690 	 * between intr vector and event fd
2691 	 * but if MSI-X has been enabled already, need to configure
2692 	 * auto clean, auto mask and throttling.
2693 	 */
2694 	gpie = rd32(hw, NGBE_GPIE);
2695 	if (!rte_intr_dp_is_en(intr_handle) &&
2696 	    !(gpie & NGBE_GPIE_MSIX))
2697 		return;
2698 
2699 	if (rte_intr_allow_others(intr_handle)) {
2700 		base = NGBE_RX_VEC_START;
2701 		vec = base;
2702 	}
2703 
2704 	/* setup GPIE for MSI-X mode */
2705 	gpie = rd32(hw, NGBE_GPIE);
2706 	gpie |= NGBE_GPIE_MSIX;
2707 	wr32(hw, NGBE_GPIE, gpie);
2708 
2709 	/* Populate the IVAR table and set the ITR values to the
2710 	 * corresponding register.
2711 	 */
2712 	if (rte_intr_dp_is_en(intr_handle)) {
2713 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
2714 			queue_id++) {
2715 			/* by default, 1:1 mapping */
2716 			ngbe_set_ivar_map(hw, 0, queue_id, vec);
2717 			rte_intr_vec_list_index_set(intr_handle,
2718 							   queue_id, vec);
2719 			if (vec < base + rte_intr_nb_efd_get(intr_handle)
2720 			    - 1)
2721 				vec++;
2722 		}
2723 
2724 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
2725 	}
2726 	wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
2727 			NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
2728 			| NGBE_ITR_WRDSA);
2729 }
2730 
2731 static u8 *
2732 ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2733 			u8 **mc_addr_ptr, u32 *vmdq)
2734 {
2735 	u8 *mc_addr;
2736 
2737 	*vmdq = 0;
2738 	mc_addr = *mc_addr_ptr;
2739 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2740 	return mc_addr;
2741 }
2742 
2743 int
2744 ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2745 			  struct rte_ether_addr *mc_addr_set,
2746 			  uint32_t nb_mc_addr)
2747 {
2748 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2749 	u8 *mc_addr_list;
2750 
2751 	mc_addr_list = (u8 *)mc_addr_set;
2752 	return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2753 					 ngbe_dev_addr_list_itr, TRUE);
2754 }
2755 
2756 static uint64_t
2757 ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
2758 {
2759 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2760 	uint64_t systime_cycles;
2761 
2762 	systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
2763 	systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
2764 
2765 	return systime_cycles;
2766 }
2767 
2768 static uint64_t
2769 ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2770 {
2771 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2772 	uint64_t rx_tstamp_cycles;
2773 
2774 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
2775 	rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
2776 	rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
2777 
2778 	return rx_tstamp_cycles;
2779 }
2780 
2781 static uint64_t
2782 ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
2783 {
2784 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2785 	uint64_t tx_tstamp_cycles;
2786 
2787 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
2788 	tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
2789 	tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
2790 
2791 	return tx_tstamp_cycles;
2792 }
2793 
2794 static void
2795 ngbe_start_timecounters(struct rte_eth_dev *dev)
2796 {
2797 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2798 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2799 	uint32_t incval = 0;
2800 	uint32_t shift = 0;
2801 
2802 	incval = NGBE_INCVAL_1GB;
2803 	shift = NGBE_INCVAL_SHIFT_1GB;
2804 
2805 	wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
2806 
2807 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
2808 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2809 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2810 
2811 	adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2812 	adapter->systime_tc.cc_shift = shift;
2813 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
2814 
2815 	adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2816 	adapter->rx_tstamp_tc.cc_shift = shift;
2817 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2818 
2819 	adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
2820 	adapter->tx_tstamp_tc.cc_shift = shift;
2821 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2822 }
2823 
2824 static int
2825 ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2826 {
2827 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2828 
2829 	adapter->systime_tc.nsec += delta;
2830 	adapter->rx_tstamp_tc.nsec += delta;
2831 	adapter->tx_tstamp_tc.nsec += delta;
2832 
2833 	return 0;
2834 }
2835 
2836 static int
2837 ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2838 {
2839 	uint64_t ns;
2840 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2841 
2842 	ns = rte_timespec_to_ns(ts);
2843 	/* Set the timecounters to a new value. */
2844 	adapter->systime_tc.nsec = ns;
2845 	adapter->rx_tstamp_tc.nsec = ns;
2846 	adapter->tx_tstamp_tc.nsec = ns;
2847 
2848 	return 0;
2849 }
2850 
2851 static int
2852 ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2853 {
2854 	uint64_t ns, systime_cycles;
2855 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2856 
2857 	systime_cycles = ngbe_read_systime_cyclecounter(dev);
2858 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
2859 	*ts = rte_ns_to_timespec(ns);
2860 
2861 	return 0;
2862 }
2863 
2864 static int
2865 ngbe_timesync_enable(struct rte_eth_dev *dev)
2866 {
2867 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2868 	uint32_t tsync_ctl;
2869 
2870 	/* Stop the timesync system time. */
2871 	wr32(hw, NGBE_TSTIMEINC, 0x0);
2872 	/* Reset the timesync system time value. */
2873 	wr32(hw, NGBE_TSTIMEL, 0x0);
2874 	wr32(hw, NGBE_TSTIMEH, 0x0);
2875 
2876 	ngbe_start_timecounters(dev);
2877 
2878 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2879 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588),
2880 		RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588);
2881 
2882 	/* Enable timestamping of received PTP packets. */
2883 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2884 	tsync_ctl |= NGBE_TSRXCTL_ENA;
2885 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2886 
2887 	/* Enable timestamping of transmitted PTP packets. */
2888 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2889 	tsync_ctl |= NGBE_TSTXCTL_ENA;
2890 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2891 
2892 	ngbe_flush(hw);
2893 
2894 	return 0;
2895 }
2896 
2897 static int
2898 ngbe_timesync_disable(struct rte_eth_dev *dev)
2899 {
2900 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2901 	uint32_t tsync_ctl;
2902 
2903 	/* Disable timestamping of transmitted PTP packets. */
2904 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
2905 	tsync_ctl &= ~NGBE_TSTXCTL_ENA;
2906 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
2907 
2908 	/* Disable timestamping of received PTP packets. */
2909 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
2910 	tsync_ctl &= ~NGBE_TSRXCTL_ENA;
2911 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
2912 
2913 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
2914 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
2915 
2916 	/* Stop incrementing the System Time registers. */
2917 	wr32(hw, NGBE_TSTIMEINC, 0);
2918 
2919 	return 0;
2920 }
2921 
2922 static int
2923 ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
2924 				 struct timespec *timestamp,
2925 				 uint32_t flags __rte_unused)
2926 {
2927 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2928 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2929 	uint32_t tsync_rxctl;
2930 	uint64_t rx_tstamp_cycles;
2931 	uint64_t ns;
2932 
2933 	tsync_rxctl = rd32(hw, NGBE_TSRXCTL);
2934 	if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0)
2935 		return -EINVAL;
2936 
2937 	rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev);
2938 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
2939 	*timestamp = rte_ns_to_timespec(ns);
2940 
2941 	return  0;
2942 }
2943 
2944 static int
2945 ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
2946 				 struct timespec *timestamp)
2947 {
2948 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2949 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
2950 	uint32_t tsync_txctl;
2951 	uint64_t tx_tstamp_cycles;
2952 	uint64_t ns;
2953 
2954 	tsync_txctl = rd32(hw, NGBE_TSTXCTL);
2955 	if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0)
2956 		return -EINVAL;
2957 
2958 	tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev);
2959 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
2960 	*timestamp = rte_ns_to_timespec(ns);
2961 
2962 	return 0;
2963 }
2964 
2965 static int
2966 ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
2967 {
2968 	int count = 0;
2969 	int g_ind = 0;
2970 	const struct reg_info *reg_group;
2971 	const struct reg_info **reg_set = ngbe_regs_others;
2972 
2973 	while ((reg_group = reg_set[g_ind++]))
2974 		count += ngbe_regs_group_count(reg_group);
2975 
2976 	return count;
2977 }
2978 
2979 static int
2980 ngbe_get_regs(struct rte_eth_dev *dev,
2981 	      struct rte_dev_reg_info *regs)
2982 {
2983 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2984 	uint32_t *data = regs->data;
2985 	int g_ind = 0;
2986 	int count = 0;
2987 	const struct reg_info *reg_group;
2988 	const struct reg_info **reg_set = ngbe_regs_others;
2989 
2990 	if (data == NULL) {
2991 		regs->length = ngbe_get_reg_length(dev);
2992 		regs->width = sizeof(uint32_t);
2993 		return 0;
2994 	}
2995 
2996 	/* Support only full register dump */
2997 	if (regs->length == 0 ||
2998 	    regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
2999 		regs->version = hw->mac.type << 24 |
3000 				hw->revision_id << 16 |
3001 				hw->device_id;
3002 		while ((reg_group = reg_set[g_ind++]))
3003 			count += ngbe_read_regs_group(dev, &data[count],
3004 						      reg_group);
3005 		return 0;
3006 	}
3007 
3008 	return -ENOTSUP;
3009 }
3010 
3011 static int
3012 ngbe_get_eeprom_length(struct rte_eth_dev *dev)
3013 {
3014 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3015 
3016 	/* Return unit is byte count */
3017 	return hw->rom.word_size * 2;
3018 }
3019 
3020 static int
3021 ngbe_get_eeprom(struct rte_eth_dev *dev,
3022 		struct rte_dev_eeprom_info *in_eeprom)
3023 {
3024 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3025 	struct ngbe_rom_info *eeprom = &hw->rom;
3026 	uint16_t *data = in_eeprom->data;
3027 	int first, length;
3028 
3029 	first = in_eeprom->offset >> 1;
3030 	length = in_eeprom->length >> 1;
3031 	if (first > hw->rom.word_size ||
3032 	    ((first + length) > hw->rom.word_size))
3033 		return -EINVAL;
3034 
3035 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3036 
3037 	return eeprom->readw_buffer(hw, first, length, data);
3038 }
3039 
3040 static int
3041 ngbe_set_eeprom(struct rte_eth_dev *dev,
3042 		struct rte_dev_eeprom_info *in_eeprom)
3043 {
3044 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
3045 	struct ngbe_rom_info *eeprom = &hw->rom;
3046 	uint16_t *data = in_eeprom->data;
3047 	int first, length;
3048 
3049 	first = in_eeprom->offset >> 1;
3050 	length = in_eeprom->length >> 1;
3051 	if (first > hw->rom.word_size ||
3052 	    ((first + length) > hw->rom.word_size))
3053 		return -EINVAL;
3054 
3055 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3056 
3057 	return eeprom->writew_buffer(hw,  first, length, data);
3058 }
3059 
3060 static const struct eth_dev_ops ngbe_eth_dev_ops = {
3061 	.dev_configure              = ngbe_dev_configure,
3062 	.dev_infos_get              = ngbe_dev_info_get,
3063 	.dev_start                  = ngbe_dev_start,
3064 	.dev_stop                   = ngbe_dev_stop,
3065 	.dev_set_link_up            = ngbe_dev_set_link_up,
3066 	.dev_set_link_down          = ngbe_dev_set_link_down,
3067 	.dev_close                  = ngbe_dev_close,
3068 	.dev_reset                  = ngbe_dev_reset,
3069 	.promiscuous_enable         = ngbe_dev_promiscuous_enable,
3070 	.promiscuous_disable        = ngbe_dev_promiscuous_disable,
3071 	.allmulticast_enable        = ngbe_dev_allmulticast_enable,
3072 	.allmulticast_disable       = ngbe_dev_allmulticast_disable,
3073 	.link_update                = ngbe_dev_link_update,
3074 	.stats_get                  = ngbe_dev_stats_get,
3075 	.xstats_get                 = ngbe_dev_xstats_get,
3076 	.xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
3077 	.stats_reset                = ngbe_dev_stats_reset,
3078 	.xstats_reset               = ngbe_dev_xstats_reset,
3079 	.xstats_get_names           = ngbe_dev_xstats_get_names,
3080 	.xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
3081 	.fw_version_get             = ngbe_fw_version_get,
3082 	.dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
3083 	.mtu_set                    = ngbe_dev_mtu_set,
3084 	.vlan_filter_set            = ngbe_vlan_filter_set,
3085 	.vlan_tpid_set              = ngbe_vlan_tpid_set,
3086 	.vlan_offload_set           = ngbe_vlan_offload_set,
3087 	.vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
3088 	.rx_queue_start	            = ngbe_dev_rx_queue_start,
3089 	.rx_queue_stop              = ngbe_dev_rx_queue_stop,
3090 	.tx_queue_start	            = ngbe_dev_tx_queue_start,
3091 	.tx_queue_stop              = ngbe_dev_tx_queue_stop,
3092 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
3093 	.rx_queue_release           = ngbe_dev_rx_queue_release,
3094 	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
3095 	.tx_queue_release           = ngbe_dev_tx_queue_release,
3096 	.dev_led_on                 = ngbe_dev_led_on,
3097 	.dev_led_off                = ngbe_dev_led_off,
3098 	.flow_ctrl_get              = ngbe_flow_ctrl_get,
3099 	.flow_ctrl_set              = ngbe_flow_ctrl_set,
3100 	.mac_addr_add               = ngbe_add_rar,
3101 	.mac_addr_remove            = ngbe_remove_rar,
3102 	.mac_addr_set               = ngbe_set_default_mac_addr,
3103 	.uc_hash_table_set          = ngbe_uc_hash_table_set,
3104 	.uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
3105 	.reta_update                = ngbe_dev_rss_reta_update,
3106 	.reta_query                 = ngbe_dev_rss_reta_query,
3107 	.rss_hash_update            = ngbe_dev_rss_hash_update,
3108 	.rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
3109 	.set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
3110 	.rxq_info_get               = ngbe_rxq_info_get,
3111 	.txq_info_get               = ngbe_txq_info_get,
3112 	.rx_burst_mode_get          = ngbe_rx_burst_mode_get,
3113 	.tx_burst_mode_get          = ngbe_tx_burst_mode_get,
3114 	.timesync_enable            = ngbe_timesync_enable,
3115 	.timesync_disable           = ngbe_timesync_disable,
3116 	.timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
3117 	.timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp,
3118 	.get_reg                    = ngbe_get_regs,
3119 	.get_eeprom_length          = ngbe_get_eeprom_length,
3120 	.get_eeprom                 = ngbe_get_eeprom,
3121 	.set_eeprom                 = ngbe_set_eeprom,
3122 	.timesync_adjust_time       = ngbe_timesync_adjust_time,
3123 	.timesync_read_time         = ngbe_timesync_read_time,
3124 	.timesync_write_time        = ngbe_timesync_write_time,
3125 	.tx_done_cleanup            = ngbe_dev_tx_done_cleanup,
3126 };
3127 
3128 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
3129 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
3130 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
3131 
3132 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
3133 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
3134 
3135 #ifdef RTE_ETHDEV_DEBUG_RX
3136 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
3137 #endif
3138 #ifdef RTE_ETHDEV_DEBUG_TX
3139 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
3140 #endif
3141