xref: /dpdk/drivers/net/ngbe/ngbe_ethdev.c (revision baca8ec066dc6fdc42374e8eafd67eecfd6c9267)
126590b52SJiawen Wu /* SPDX-License-Identifier: BSD-3-Clause
226590b52SJiawen Wu  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
326590b52SJiawen Wu  * Copyright(c) 2010-2017 Intel Corporation
426590b52SJiawen Wu  */
526590b52SJiawen Wu 
626590b52SJiawen Wu #include <errno.h>
726590b52SJiawen Wu #include <rte_common.h>
826590b52SJiawen Wu #include <ethdev_pci.h>
926590b52SJiawen Wu 
10b9246b8fSJiawen Wu #include <rte_alarm.h>
11b9246b8fSJiawen Wu 
12cc934df1SJiawen Wu #include "ngbe_logs.h"
1368eb13a1SJiawen Wu #include "ngbe.h"
1468eb13a1SJiawen Wu #include "ngbe_ethdev.h"
1543b7e5eaSJiawen Wu #include "ngbe_rxtx.h"
1671aec127SJiawen Wu #include "ngbe_regs_group.h"
1771aec127SJiawen Wu 
1871aec127SJiawen Wu static const struct reg_info ngbe_regs_general[] = {
1971aec127SJiawen Wu 	{NGBE_RST, 1, 1, "NGBE_RST"},
2071aec127SJiawen Wu 	{NGBE_STAT, 1, 1, "NGBE_STAT"},
2171aec127SJiawen Wu 	{NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
2271aec127SJiawen Wu 	{NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
2371aec127SJiawen Wu 	{NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
2471aec127SJiawen Wu 	{NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
2571aec127SJiawen Wu 	{0, 0, 0, ""}
2671aec127SJiawen Wu };
2771aec127SJiawen Wu 
2871aec127SJiawen Wu static const struct reg_info ngbe_regs_nvm[] = {
2971aec127SJiawen Wu 	{0, 0, 0, ""}
3071aec127SJiawen Wu };
3171aec127SJiawen Wu 
3271aec127SJiawen Wu static const struct reg_info ngbe_regs_interrupt[] = {
3371aec127SJiawen Wu 	{0, 0, 0, ""}
3471aec127SJiawen Wu };
3571aec127SJiawen Wu 
3671aec127SJiawen Wu static const struct reg_info ngbe_regs_fctl_others[] = {
3771aec127SJiawen Wu 	{0, 0, 0, ""}
3871aec127SJiawen Wu };
3971aec127SJiawen Wu 
4071aec127SJiawen Wu static const struct reg_info ngbe_regs_rxdma[] = {
4171aec127SJiawen Wu 	{0, 0, 0, ""}
4271aec127SJiawen Wu };
4371aec127SJiawen Wu 
4471aec127SJiawen Wu static const struct reg_info ngbe_regs_rx[] = {
4571aec127SJiawen Wu 	{0, 0, 0, ""}
4671aec127SJiawen Wu };
4771aec127SJiawen Wu 
4871aec127SJiawen Wu static struct reg_info ngbe_regs_tx[] = {
4971aec127SJiawen Wu 	{0, 0, 0, ""}
5071aec127SJiawen Wu };
5171aec127SJiawen Wu 
5271aec127SJiawen Wu static const struct reg_info ngbe_regs_wakeup[] = {
5371aec127SJiawen Wu 	{0, 0, 0, ""}
5471aec127SJiawen Wu };
5571aec127SJiawen Wu 
5671aec127SJiawen Wu static const struct reg_info ngbe_regs_mac[] = {
5771aec127SJiawen Wu 	{0, 0, 0, ""}
5871aec127SJiawen Wu };
5971aec127SJiawen Wu 
6071aec127SJiawen Wu static const struct reg_info ngbe_regs_diagnostic[] = {
6171aec127SJiawen Wu 	{0, 0, 0, ""},
6271aec127SJiawen Wu };
6371aec127SJiawen Wu 
6471aec127SJiawen Wu /* PF registers */
6571aec127SJiawen Wu static const struct reg_info *ngbe_regs_others[] = {
6671aec127SJiawen Wu 				ngbe_regs_general,
6771aec127SJiawen Wu 				ngbe_regs_nvm,
6871aec127SJiawen Wu 				ngbe_regs_interrupt,
6971aec127SJiawen Wu 				ngbe_regs_fctl_others,
7071aec127SJiawen Wu 				ngbe_regs_rxdma,
7171aec127SJiawen Wu 				ngbe_regs_rx,
7271aec127SJiawen Wu 				ngbe_regs_tx,
7371aec127SJiawen Wu 				ngbe_regs_wakeup,
7471aec127SJiawen Wu 				ngbe_regs_mac,
7571aec127SJiawen Wu 				ngbe_regs_diagnostic,
7671aec127SJiawen Wu 				NULL};
7768eb13a1SJiawen Wu 
7868eb13a1SJiawen Wu static int ngbe_dev_close(struct rte_eth_dev *dev);
793518df57SJiawen Wu static int ngbe_dev_link_update(struct rte_eth_dev *dev,
803518df57SJiawen Wu 				int wait_to_complete);
81fdb1e851SJiawen Wu static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
8259b46438SJiawen Wu static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
8359b46438SJiawen Wu static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
8459b46438SJiawen Wu 					uint16_t queue);
856ee7e574SJiawen Wu 
863518df57SJiawen Wu static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
873518df57SJiawen Wu static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
883518df57SJiawen Wu static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
893518df57SJiawen Wu static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
903518df57SJiawen Wu static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
91b9246b8fSJiawen Wu static void ngbe_dev_interrupt_handler(void *param);
923518df57SJiawen Wu static void ngbe_configure_msix(struct rte_eth_dev *dev);
93d19fa5a1SJiawen Wu static void ngbe_pbthresh_set(struct rte_eth_dev *dev);
94b9246b8fSJiawen Wu 
9559b46438SJiawen Wu #define NGBE_SET_HWSTRIP(h, q) do {\
9659b46438SJiawen Wu 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
9759b46438SJiawen Wu 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
9859b46438SJiawen Wu 		(h)->bitmap[idx] |= 1 << bit;\
9959b46438SJiawen Wu 	} while (0)
10059b46438SJiawen Wu 
10159b46438SJiawen Wu #define NGBE_CLEAR_HWSTRIP(h, q) do {\
10259b46438SJiawen Wu 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
10359b46438SJiawen Wu 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
10459b46438SJiawen Wu 		(h)->bitmap[idx] &= ~(1 << bit);\
10559b46438SJiawen Wu 	} while (0)
10659b46438SJiawen Wu 
10759b46438SJiawen Wu #define NGBE_GET_HWSTRIP(h, q, r) do {\
10859b46438SJiawen Wu 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
10959b46438SJiawen Wu 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
11059b46438SJiawen Wu 		(r) = (h)->bitmap[idx] >> bit & 1;\
11159b46438SJiawen Wu 	} while (0)
11259b46438SJiawen Wu 
1136ee7e574SJiawen Wu /*
1146ee7e574SJiawen Wu  * The set of PCI devices this driver supports
1156ee7e574SJiawen Wu  */
1166ee7e574SJiawen Wu static const struct rte_pci_id pci_id_ngbe_map[] = {
1176ee7e574SJiawen Wu 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
1186ee7e574SJiawen Wu 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
1196ee7e574SJiawen Wu 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
1206ee7e574SJiawen Wu 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
1216ee7e574SJiawen Wu 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
1226ee7e574SJiawen Wu 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
1236ee7e574SJiawen Wu 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
1246ee7e574SJiawen Wu 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
1256ee7e574SJiawen Wu 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
1266ee7e574SJiawen Wu 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
1276ee7e574SJiawen Wu 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
1286ee7e574SJiawen Wu 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
1296ee7e574SJiawen Wu 	{ .vendor_id = 0, /* sentinel */ },
1306ee7e574SJiawen Wu };
1316ee7e574SJiawen Wu 
13243b7e5eaSJiawen Wu static const struct rte_eth_desc_lim rx_desc_lim = {
13343b7e5eaSJiawen Wu 	.nb_max = NGBE_RING_DESC_MAX,
13443b7e5eaSJiawen Wu 	.nb_min = NGBE_RING_DESC_MIN,
13543b7e5eaSJiawen Wu 	.nb_align = NGBE_RXD_ALIGN,
13643b7e5eaSJiawen Wu };
13743b7e5eaSJiawen Wu 
138a58e7c31SJiawen Wu static const struct rte_eth_desc_lim tx_desc_lim = {
139a58e7c31SJiawen Wu 	.nb_max = NGBE_RING_DESC_MAX,
140a58e7c31SJiawen Wu 	.nb_min = NGBE_RING_DESC_MIN,
141a58e7c31SJiawen Wu 	.nb_align = NGBE_TXD_ALIGN,
142a58e7c31SJiawen Wu 	.nb_seg_max = NGBE_TX_MAX_SEG,
143a58e7c31SJiawen Wu 	.nb_mtu_seg_max = NGBE_TX_MAX_SEG,
144a58e7c31SJiawen Wu };
145a58e7c31SJiawen Wu 
146b9246b8fSJiawen Wu static const struct eth_dev_ops ngbe_eth_dev_ops;
147b9246b8fSJiawen Wu 
1488b433d04SJiawen Wu #define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
1498b433d04SJiawen Wu #define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
1508b433d04SJiawen Wu static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
1518b433d04SJiawen Wu 	/* MNG RxTx */
1528b433d04SJiawen Wu 	HW_XSTAT(mng_bmc2host_packets),
1538b433d04SJiawen Wu 	HW_XSTAT(mng_host2bmc_packets),
1548b433d04SJiawen Wu 	/* Basic RxTx */
1558b433d04SJiawen Wu 	HW_XSTAT(rx_packets),
1568b433d04SJiawen Wu 	HW_XSTAT(tx_packets),
1578b433d04SJiawen Wu 	HW_XSTAT(rx_bytes),
1588b433d04SJiawen Wu 	HW_XSTAT(tx_bytes),
1598b433d04SJiawen Wu 	HW_XSTAT(rx_total_bytes),
1608b433d04SJiawen Wu 	HW_XSTAT(rx_total_packets),
1618b433d04SJiawen Wu 	HW_XSTAT(tx_total_packets),
1628b433d04SJiawen Wu 	HW_XSTAT(rx_total_missed_packets),
1638b433d04SJiawen Wu 	HW_XSTAT(rx_broadcast_packets),
1649e2d2fadSJiawen Wu 	HW_XSTAT(tx_broadcast_packets),
1658b433d04SJiawen Wu 	HW_XSTAT(rx_multicast_packets),
1669e2d2fadSJiawen Wu 	HW_XSTAT(tx_multicast_packets),
1678b433d04SJiawen Wu 	HW_XSTAT(rx_management_packets),
1688b433d04SJiawen Wu 	HW_XSTAT(tx_management_packets),
1698b433d04SJiawen Wu 	HW_XSTAT(rx_management_dropped),
17051d36568SJiawen Wu 	HW_XSTAT(rx_dma_drop),
171fee9350fSJiawen Wu 	HW_XSTAT(tx_dma_drop),
17251d36568SJiawen Wu 	HW_XSTAT(tx_secdrp_packets),
1738b433d04SJiawen Wu 
1748b433d04SJiawen Wu 	/* Basic Error */
1758b433d04SJiawen Wu 	HW_XSTAT(rx_crc_errors),
1768b433d04SJiawen Wu 	HW_XSTAT(rx_illegal_byte_errors),
1778b433d04SJiawen Wu 	HW_XSTAT(rx_error_bytes),
1788b433d04SJiawen Wu 	HW_XSTAT(rx_mac_short_packet_dropped),
1798b433d04SJiawen Wu 	HW_XSTAT(rx_length_errors),
1808b433d04SJiawen Wu 	HW_XSTAT(rx_undersize_errors),
1818b433d04SJiawen Wu 	HW_XSTAT(rx_fragment_errors),
182fee9350fSJiawen Wu 	HW_XSTAT(rx_oversize_cnt),
1838b433d04SJiawen Wu 	HW_XSTAT(rx_jabber_errors),
1848b433d04SJiawen Wu 	HW_XSTAT(rx_l3_l4_xsum_error),
1858b433d04SJiawen Wu 	HW_XSTAT(mac_local_errors),
1868b433d04SJiawen Wu 	HW_XSTAT(mac_remote_errors),
1878b433d04SJiawen Wu 
18851d36568SJiawen Wu 	/* PB Stats */
18951d36568SJiawen Wu 	HW_XSTAT(rx_up_dropped),
19051d36568SJiawen Wu 	HW_XSTAT(rdb_pkt_cnt),
19151d36568SJiawen Wu 	HW_XSTAT(rdb_repli_cnt),
19251d36568SJiawen Wu 	HW_XSTAT(rdb_drp_cnt),
19351d36568SJiawen Wu 
1948b433d04SJiawen Wu 	/* MACSEC */
1958b433d04SJiawen Wu 	HW_XSTAT(tx_macsec_pkts_untagged),
1968b433d04SJiawen Wu 	HW_XSTAT(tx_macsec_pkts_encrypted),
1978b433d04SJiawen Wu 	HW_XSTAT(tx_macsec_pkts_protected),
1988b433d04SJiawen Wu 	HW_XSTAT(tx_macsec_octets_encrypted),
1998b433d04SJiawen Wu 	HW_XSTAT(tx_macsec_octets_protected),
2008b433d04SJiawen Wu 	HW_XSTAT(rx_macsec_pkts_untagged),
2018b433d04SJiawen Wu 	HW_XSTAT(rx_macsec_pkts_badtag),
2028b433d04SJiawen Wu 	HW_XSTAT(rx_macsec_pkts_nosci),
2038b433d04SJiawen Wu 	HW_XSTAT(rx_macsec_pkts_unknownsci),
2048b433d04SJiawen Wu 	HW_XSTAT(rx_macsec_octets_decrypted),
2058b433d04SJiawen Wu 	HW_XSTAT(rx_macsec_octets_validated),
2068b433d04SJiawen Wu 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
2078b433d04SJiawen Wu 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
2088b433d04SJiawen Wu 	HW_XSTAT(rx_macsec_sc_pkts_late),
2098b433d04SJiawen Wu 	HW_XSTAT(rx_macsec_sa_pkts_ok),
2108b433d04SJiawen Wu 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
2118b433d04SJiawen Wu 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
2128b433d04SJiawen Wu 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
2138b433d04SJiawen Wu 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
2148b433d04SJiawen Wu 
2158b433d04SJiawen Wu 	/* MAC RxTx */
2168b433d04SJiawen Wu 	HW_XSTAT(rx_size_64_packets),
2178b433d04SJiawen Wu 	HW_XSTAT(rx_size_65_to_127_packets),
2188b433d04SJiawen Wu 	HW_XSTAT(rx_size_128_to_255_packets),
2198b433d04SJiawen Wu 	HW_XSTAT(rx_size_256_to_511_packets),
2208b433d04SJiawen Wu 	HW_XSTAT(rx_size_512_to_1023_packets),
2218b433d04SJiawen Wu 	HW_XSTAT(rx_size_1024_to_max_packets),
2228b433d04SJiawen Wu 	HW_XSTAT(tx_size_64_packets),
2238b433d04SJiawen Wu 	HW_XSTAT(tx_size_65_to_127_packets),
2248b433d04SJiawen Wu 	HW_XSTAT(tx_size_128_to_255_packets),
2258b433d04SJiawen Wu 	HW_XSTAT(tx_size_256_to_511_packets),
2268b433d04SJiawen Wu 	HW_XSTAT(tx_size_512_to_1023_packets),
2278b433d04SJiawen Wu 	HW_XSTAT(tx_size_1024_to_max_packets),
2288b433d04SJiawen Wu 
2298b433d04SJiawen Wu 	/* Flow Control */
2308b433d04SJiawen Wu 	HW_XSTAT(tx_xon_packets),
2318b433d04SJiawen Wu 	HW_XSTAT(rx_xon_packets),
2328b433d04SJiawen Wu 	HW_XSTAT(tx_xoff_packets),
2338b433d04SJiawen Wu 	HW_XSTAT(rx_xoff_packets),
2348b433d04SJiawen Wu 
2358b433d04SJiawen Wu 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
2368b433d04SJiawen Wu 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
2378b433d04SJiawen Wu 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
2388b433d04SJiawen Wu 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
2398b433d04SJiawen Wu };
2408b433d04SJiawen Wu 
2418b433d04SJiawen Wu #define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
2428b433d04SJiawen Wu 			   sizeof(rte_ngbe_stats_strings[0]))
2438b433d04SJiawen Wu 
2448b433d04SJiawen Wu /* Per-queue statistics */
2458b433d04SJiawen Wu #define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
2468b433d04SJiawen Wu static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
2478b433d04SJiawen Wu 	QP_XSTAT(rx_qp_packets),
2488b433d04SJiawen Wu 	QP_XSTAT(tx_qp_packets),
2498b433d04SJiawen Wu 	QP_XSTAT(rx_qp_bytes),
2508b433d04SJiawen Wu 	QP_XSTAT(tx_qp_bytes),
2518b433d04SJiawen Wu 	QP_XSTAT(rx_qp_mc_packets),
2528b433d04SJiawen Wu };
2538b433d04SJiawen Wu 
2548b433d04SJiawen Wu #define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
2558b433d04SJiawen Wu 			   sizeof(rte_ngbe_qp_strings[0]))
2568b433d04SJiawen Wu 
2573518df57SJiawen Wu static inline int32_t
2583518df57SJiawen Wu ngbe_pf_reset_hw(struct ngbe_hw *hw)
2593518df57SJiawen Wu {
2603518df57SJiawen Wu 	uint32_t ctrl_ext;
2613518df57SJiawen Wu 	int32_t status;
2623518df57SJiawen Wu 
2633518df57SJiawen Wu 	status = hw->mac.reset_hw(hw);
2643518df57SJiawen Wu 
2653518df57SJiawen Wu 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
266cb7be5b5SJiawen Wu 	/* let hardware know driver is loaded */
267cb7be5b5SJiawen Wu 	ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
2683518df57SJiawen Wu 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
2693518df57SJiawen Wu 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
2703518df57SJiawen Wu 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
2713518df57SJiawen Wu 	ngbe_flush(hw);
2723518df57SJiawen Wu 
2733518df57SJiawen Wu 	if (status == NGBE_ERR_SFP_NOT_PRESENT)
2743518df57SJiawen Wu 		status = 0;
2753518df57SJiawen Wu 	return status;
2763518df57SJiawen Wu }
2773518df57SJiawen Wu 
278b9246b8fSJiawen Wu static inline void
279b9246b8fSJiawen Wu ngbe_enable_intr(struct rte_eth_dev *dev)
280b9246b8fSJiawen Wu {
281b9246b8fSJiawen Wu 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
282b9246b8fSJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
283b9246b8fSJiawen Wu 
284b9246b8fSJiawen Wu 	wr32(hw, NGBE_IENMISC, intr->mask_misc);
285b9246b8fSJiawen Wu 	wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
286b9246b8fSJiawen Wu 	ngbe_flush(hw);
287b9246b8fSJiawen Wu }
288b9246b8fSJiawen Wu 
289b9246b8fSJiawen Wu static void
290b9246b8fSJiawen Wu ngbe_disable_intr(struct ngbe_hw *hw)
291b9246b8fSJiawen Wu {
292b9246b8fSJiawen Wu 	PMD_INIT_FUNC_TRACE();
293b9246b8fSJiawen Wu 
294b9246b8fSJiawen Wu 	wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
295b9246b8fSJiawen Wu 	ngbe_flush(hw);
296b9246b8fSJiawen Wu }
297b9246b8fSJiawen Wu 
298f501a195SJiawen Wu /*
299f501a195SJiawen Wu  * Ensure that all locks are released before first NVM or PHY access
300f501a195SJiawen Wu  */
301f501a195SJiawen Wu static void
302f501a195SJiawen Wu ngbe_swfw_lock_reset(struct ngbe_hw *hw)
303f501a195SJiawen Wu {
304f501a195SJiawen Wu 	uint16_t mask;
305f501a195SJiawen Wu 
306f501a195SJiawen Wu 	/*
307f501a195SJiawen Wu 	 * These ones are more tricky since they are common to all ports; but
308f501a195SJiawen Wu 	 * swfw_sync retries last long enough (1s) to be almost sure that if
309f501a195SJiawen Wu 	 * lock can not be taken it is due to an improper lock of the
310f501a195SJiawen Wu 	 * semaphore.
311f501a195SJiawen Wu 	 */
312f501a195SJiawen Wu 	mask = NGBE_MNGSEM_SWPHY |
313f501a195SJiawen Wu 	       NGBE_MNGSEM_SWMBX |
314f501a195SJiawen Wu 	       NGBE_MNGSEM_SWFLASH;
315f501a195SJiawen Wu 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
316f501a195SJiawen Wu 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
317f501a195SJiawen Wu 
318f501a195SJiawen Wu 	hw->mac.release_swfw_sync(hw, mask);
319f501a195SJiawen Wu }
320f501a195SJiawen Wu 
32126590b52SJiawen Wu static int
3226ee7e574SJiawen Wu eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
3236ee7e574SJiawen Wu {
3246ee7e574SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
32568eb13a1SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
32659b46438SJiawen Wu 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
32759b46438SJiawen Wu 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
328d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
32978710873SJiawen Wu 	const struct rte_memzone *mz;
330b9246b8fSJiawen Wu 	uint32_t ctrl_ext;
331fbd5ceb0SJiawen Wu 	u32 led_conf = 0;
33260229dcfSJiawen Wu 	int err, ret;
3336ee7e574SJiawen Wu 
334cc934df1SJiawen Wu 	PMD_INIT_FUNC_TRACE();
335cc934df1SJiawen Wu 
336b9246b8fSJiawen Wu 	eth_dev->dev_ops = &ngbe_eth_dev_ops;
337b7aad633SJiawen Wu 	eth_dev->rx_queue_count       = ngbe_dev_rx_queue_count;
338b7aad633SJiawen Wu 	eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
339b7aad633SJiawen Wu 	eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
34093dfebd2SJiawen Wu 	eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
3419f320614SJiawen Wu 	eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
3429f320614SJiawen Wu 	eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
343b9246b8fSJiawen Wu 
34479f3128dSJiawen Wu 	/*
34579f3128dSJiawen Wu 	 * For secondary processes, we don't initialise any further as primary
34679f3128dSJiawen Wu 	 * has already done this work. Only check we don't need a different
34779f3128dSJiawen Wu 	 * Rx and Tx function.
34879f3128dSJiawen Wu 	 */
34979f3128dSJiawen Wu 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3509f320614SJiawen Wu 		struct ngbe_tx_queue *txq;
3519f320614SJiawen Wu 		/* Tx queue function in primary, set by last queue initialized
3529f320614SJiawen Wu 		 * Tx queue may not initialized by primary process
3539f320614SJiawen Wu 		 */
3549f320614SJiawen Wu 		if (eth_dev->data->tx_queues) {
3559f320614SJiawen Wu 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
3569f320614SJiawen Wu 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
3579f320614SJiawen Wu 			ngbe_set_tx_function(eth_dev, txq);
3589f320614SJiawen Wu 		} else {
3599f320614SJiawen Wu 			/* Use default Tx function if we get here */
3609f320614SJiawen Wu 			PMD_INIT_LOG(NOTICE,
3619f320614SJiawen Wu 				"No Tx queues configured yet. Using default Tx function.");
3629f320614SJiawen Wu 		}
3639f320614SJiawen Wu 
36479f3128dSJiawen Wu 		ngbe_set_rx_function(eth_dev);
36579f3128dSJiawen Wu 
3666ee7e574SJiawen Wu 		return 0;
36779f3128dSJiawen Wu 	}
3686ee7e574SJiawen Wu 
3696ee7e574SJiawen Wu 	rte_eth_copy_pci_info(eth_dev, pci_dev);
370fdb1e851SJiawen Wu 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
3716ee7e574SJiawen Wu 
3729fa23029SJiawen Wu 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
3739fa23029SJiawen Wu 
37468eb13a1SJiawen Wu 	/* Vendor and Device ID need to be set before init of shared code */
3750aeb133cSJiawen Wu 	hw->back = pci_dev;
37691e64c0eSJiawen Wu 	hw->port_id = eth_dev->data->port_id;
37768eb13a1SJiawen Wu 	hw->device_id = pci_dev->id.device_id;
37868eb13a1SJiawen Wu 	hw->vendor_id = pci_dev->id.vendor_id;
379240422edSJiawen Wu 	if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) {
38068eb13a1SJiawen Wu 		hw->sub_system_id = pci_dev->id.subsystem_device_id;
381240422edSJiawen Wu 	} else {
38291e64c0eSJiawen Wu 		u32 ssid = 0;
383240422edSJiawen Wu 
38491e64c0eSJiawen Wu 		err = ngbe_flash_read_dword(hw, 0xFFFDC, &ssid);
38591e64c0eSJiawen Wu 		if (err) {
386240422edSJiawen Wu 			PMD_INIT_LOG(ERR,
387f665790aSDavid Marchand 				"Read of internal subsystem device id failed");
388240422edSJiawen Wu 			return -ENODEV;
389240422edSJiawen Wu 		}
390240422edSJiawen Wu 		hw->sub_system_id = (u16)ssid >> 8 | (u16)ssid << 8;
391240422edSJiawen Wu 	}
39268eb13a1SJiawen Wu 	ngbe_map_device_id(hw);
39368eb13a1SJiawen Wu 
39478710873SJiawen Wu 	/* Reserve memory for interrupt status block */
39578710873SJiawen Wu 	mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
39678710873SJiawen Wu 		NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
39778710873SJiawen Wu 	if (mz == NULL)
39878710873SJiawen Wu 		return -ENOMEM;
39978710873SJiawen Wu 
40078710873SJiawen Wu 	hw->isb_dma = TMZ_PADDR(mz);
40178710873SJiawen Wu 	hw->isb_mem = TMZ_VADDR(mz);
40278710873SJiawen Wu 
40368eb13a1SJiawen Wu 	/* Initialize the shared code (base driver) */
40468eb13a1SJiawen Wu 	err = ngbe_init_shared_code(hw);
40568eb13a1SJiawen Wu 	if (err != 0) {
40668eb13a1SJiawen Wu 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
40768eb13a1SJiawen Wu 		return -EIO;
40868eb13a1SJiawen Wu 	}
40968eb13a1SJiawen Wu 
410f501a195SJiawen Wu 	/* Unlock any pending hardware semaphore */
411f501a195SJiawen Wu 	ngbe_swfw_lock_reset(hw);
4125f1ab0d5SJiawen Wu 	ngbe_set_ncsi_status(hw);
413f501a195SJiawen Wu 
414f40e9f0eSJiawen Wu 	/* Get Hardware Flow Control setting */
415f40e9f0eSJiawen Wu 	hw->fc.requested_mode = ngbe_fc_full;
416f40e9f0eSJiawen Wu 	hw->fc.current_mode = ngbe_fc_full;
417f40e9f0eSJiawen Wu 	hw->fc.pause_time = NGBE_FC_PAUSE_TIME;
418f40e9f0eSJiawen Wu 	hw->fc.low_water = NGBE_FC_XON_LOTH;
419f40e9f0eSJiawen Wu 	hw->fc.high_water = NGBE_FC_XOFF_HITH;
420f40e9f0eSJiawen Wu 	hw->fc.send_xon = 1;
421f40e9f0eSJiawen Wu 
422f501a195SJiawen Wu 	err = hw->rom.init_params(hw);
423f501a195SJiawen Wu 	if (err != 0) {
424f501a195SJiawen Wu 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
425f501a195SJiawen Wu 		return -EIO;
426f501a195SJiawen Wu 	}
427f501a195SJiawen Wu 
428f501a195SJiawen Wu 	/* Make sure we have a good EEPROM before we read from it */
429f501a195SJiawen Wu 	err = hw->rom.validate_checksum(hw, NULL);
430f501a195SJiawen Wu 	if (err != 0) {
431f501a195SJiawen Wu 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
432f501a195SJiawen Wu 		return -EIO;
433f501a195SJiawen Wu 	}
434f501a195SJiawen Wu 
435fbd5ceb0SJiawen Wu 	err = hw->phy.led_oem_chk(hw, &led_conf);
436fbd5ceb0SJiawen Wu 	if (err == 0)
437fbd5ceb0SJiawen Wu 		hw->led_conf = led_conf;
438fbd5ceb0SJiawen Wu 	else
439fbd5ceb0SJiawen Wu 		hw->led_conf = 0xFFFF;
440fbd5ceb0SJiawen Wu 
44178710873SJiawen Wu 	err = hw->mac.init_hw(hw);
44278710873SJiawen Wu 	if (err != 0) {
44378710873SJiawen Wu 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
44478710873SJiawen Wu 		return -EIO;
44578710873SJiawen Wu 	}
44678710873SJiawen Wu 
447fdb1e851SJiawen Wu 	/* Reset the hw statistics */
448fdb1e851SJiawen Wu 	ngbe_dev_stats_reset(eth_dev);
449fdb1e851SJiawen Wu 
450b9246b8fSJiawen Wu 	/* disable interrupt */
451b9246b8fSJiawen Wu 	ngbe_disable_intr(hw);
452b9246b8fSJiawen Wu 
453539d55daSJiawen Wu 	/* Allocate memory for storing MAC addresses */
454539d55daSJiawen Wu 	eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
455539d55daSJiawen Wu 					       hw->mac.num_rar_entries, 0);
456539d55daSJiawen Wu 	if (eth_dev->data->mac_addrs == NULL) {
457539d55daSJiawen Wu 		PMD_INIT_LOG(ERR,
458539d55daSJiawen Wu 			     "Failed to allocate %u bytes needed to store MAC addresses",
459539d55daSJiawen Wu 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
460539d55daSJiawen Wu 		return -ENOMEM;
461539d55daSJiawen Wu 	}
462539d55daSJiawen Wu 
463539d55daSJiawen Wu 	/* Copy the permanent MAC address */
464539d55daSJiawen Wu 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
465539d55daSJiawen Wu 			&eth_dev->data->mac_addrs[0]);
466539d55daSJiawen Wu 
467539d55daSJiawen Wu 	/* Allocate memory for storing hash filter MAC addresses */
468539d55daSJiawen Wu 	eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
469539d55daSJiawen Wu 			RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
470539d55daSJiawen Wu 	if (eth_dev->data->hash_mac_addrs == NULL) {
471539d55daSJiawen Wu 		PMD_INIT_LOG(ERR,
472539d55daSJiawen Wu 			     "Failed to allocate %d bytes needed to store MAC addresses",
473539d55daSJiawen Wu 			     RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
474539d55daSJiawen Wu 		rte_free(eth_dev->data->mac_addrs);
475539d55daSJiawen Wu 		eth_dev->data->mac_addrs = NULL;
476539d55daSJiawen Wu 		return -ENOMEM;
477539d55daSJiawen Wu 	}
478539d55daSJiawen Wu 
47959b46438SJiawen Wu 	/* initialize the vfta */
48059b46438SJiawen Wu 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
48159b46438SJiawen Wu 
48259b46438SJiawen Wu 	/* initialize the hw strip bitmap*/
48359b46438SJiawen Wu 	memset(hwstrip, 0, sizeof(*hwstrip));
48459b46438SJiawen Wu 
48560229dcfSJiawen Wu 	/* initialize PF if max_vfs not zero */
48660229dcfSJiawen Wu 	ret = ngbe_pf_host_init(eth_dev);
48760229dcfSJiawen Wu 	if (ret) {
48860229dcfSJiawen Wu 		rte_free(eth_dev->data->mac_addrs);
48960229dcfSJiawen Wu 		eth_dev->data->mac_addrs = NULL;
49060229dcfSJiawen Wu 		rte_free(eth_dev->data->hash_mac_addrs);
49160229dcfSJiawen Wu 		eth_dev->data->hash_mac_addrs = NULL;
49260229dcfSJiawen Wu 		return ret;
49360229dcfSJiawen Wu 	}
49460229dcfSJiawen Wu 
495b9246b8fSJiawen Wu 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
496b9246b8fSJiawen Wu 	/* let hardware know driver is loaded */
497b9246b8fSJiawen Wu 	ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
498b9246b8fSJiawen Wu 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
499b9246b8fSJiawen Wu 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
500b9246b8fSJiawen Wu 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
501b9246b8fSJiawen Wu 	ngbe_flush(hw);
502b9246b8fSJiawen Wu 
5033518df57SJiawen Wu 	PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
5043518df57SJiawen Wu 			(int)hw->mac.type, (int)hw->phy.type);
5053518df57SJiawen Wu 
5063518df57SJiawen Wu 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
5073518df57SJiawen Wu 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
5083518df57SJiawen Wu 		     pci_dev->id.device_id);
5093518df57SJiawen Wu 
510b9246b8fSJiawen Wu 	rte_intr_callback_register(intr_handle,
511b9246b8fSJiawen Wu 				   ngbe_dev_interrupt_handler, eth_dev);
512b9246b8fSJiawen Wu 
513b9246b8fSJiawen Wu 	/* enable uio/vfio intr/eventfd mapping */
514b9246b8fSJiawen Wu 	rte_intr_enable(intr_handle);
515b9246b8fSJiawen Wu 
516b9246b8fSJiawen Wu 	/* enable support intr */
517b9246b8fSJiawen Wu 	ngbe_enable_intr(eth_dev);
518b9246b8fSJiawen Wu 
51968eb13a1SJiawen Wu 	return 0;
5206ee7e574SJiawen Wu }
5216ee7e574SJiawen Wu 
5226ee7e574SJiawen Wu static int
5236ee7e574SJiawen Wu eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
5246ee7e574SJiawen Wu {
525cc934df1SJiawen Wu 	PMD_INIT_FUNC_TRACE();
526cc934df1SJiawen Wu 
5276ee7e574SJiawen Wu 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5286ee7e574SJiawen Wu 		return 0;
5296ee7e574SJiawen Wu 
53068eb13a1SJiawen Wu 	ngbe_dev_close(eth_dev);
5316ee7e574SJiawen Wu 
532cc63194eSJiawen Wu 	return 0;
5336ee7e574SJiawen Wu }
5346ee7e574SJiawen Wu 
5356ee7e574SJiawen Wu static int
5366ee7e574SJiawen Wu eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
53726590b52SJiawen Wu 		struct rte_pci_device *pci_dev)
53826590b52SJiawen Wu {
5396ee7e574SJiawen Wu 	return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
54068eb13a1SJiawen Wu 			sizeof(struct ngbe_adapter),
54168eb13a1SJiawen Wu 			eth_dev_pci_specific_init, pci_dev,
5426ee7e574SJiawen Wu 			eth_ngbe_dev_init, NULL);
54326590b52SJiawen Wu }
54426590b52SJiawen Wu 
54526590b52SJiawen Wu static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
54626590b52SJiawen Wu {
5476ee7e574SJiawen Wu 	struct rte_eth_dev *ethdev;
5486ee7e574SJiawen Wu 
5496ee7e574SJiawen Wu 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
5506ee7e574SJiawen Wu 	if (ethdev == NULL)
5516ee7e574SJiawen Wu 		return 0;
5526ee7e574SJiawen Wu 
553c602202cSJiawen Wu 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ngbe_dev_uninit);
55426590b52SJiawen Wu }
55526590b52SJiawen Wu 
55626590b52SJiawen Wu static struct rte_pci_driver rte_ngbe_pmd = {
5576ee7e574SJiawen Wu 	.id_table = pci_id_ngbe_map,
558b9246b8fSJiawen Wu 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
559b9246b8fSJiawen Wu 		     RTE_PCI_DRV_INTR_LSC,
56026590b52SJiawen Wu 	.probe = eth_ngbe_pci_probe,
56126590b52SJiawen Wu 	.remove = eth_ngbe_pci_remove,
56226590b52SJiawen Wu };
56326590b52SJiawen Wu 
564b9246b8fSJiawen Wu static int
56559b46438SJiawen Wu ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
56659b46438SJiawen Wu {
56759b46438SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
56859b46438SJiawen Wu 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
56959b46438SJiawen Wu 	uint32_t vfta;
57059b46438SJiawen Wu 	uint32_t vid_idx;
57159b46438SJiawen Wu 	uint32_t vid_bit;
57259b46438SJiawen Wu 
57359b46438SJiawen Wu 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
57459b46438SJiawen Wu 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
57559b46438SJiawen Wu 	vfta = rd32(hw, NGBE_VLANTBL(vid_idx));
57659b46438SJiawen Wu 	if (on)
57759b46438SJiawen Wu 		vfta |= vid_bit;
57859b46438SJiawen Wu 	else
57959b46438SJiawen Wu 		vfta &= ~vid_bit;
58059b46438SJiawen Wu 	wr32(hw, NGBE_VLANTBL(vid_idx), vfta);
58159b46438SJiawen Wu 
58259b46438SJiawen Wu 	/* update local VFTA copy */
58359b46438SJiawen Wu 	shadow_vfta->vfta[vid_idx] = vfta;
58459b46438SJiawen Wu 
58559b46438SJiawen Wu 	return 0;
58659b46438SJiawen Wu }
58759b46438SJiawen Wu 
58859b46438SJiawen Wu static void
589*baca8ec0SJiawen Wu ngbe_vlan_strip_q_set(struct rte_eth_dev *dev, uint16_t queue, int on)
59059b46438SJiawen Wu {
59159b46438SJiawen Wu 	if (on)
59259b46438SJiawen Wu 		ngbe_vlan_hw_strip_enable(dev, queue);
59359b46438SJiawen Wu 	else
59459b46438SJiawen Wu 		ngbe_vlan_hw_strip_disable(dev, queue);
59559b46438SJiawen Wu }
59659b46438SJiawen Wu 
597*baca8ec0SJiawen Wu static void
598*baca8ec0SJiawen Wu ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
599*baca8ec0SJiawen Wu {
600*baca8ec0SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
601*baca8ec0SJiawen Wu 
602*baca8ec0SJiawen Wu 	if (!hw->adapter_stopped) {
603*baca8ec0SJiawen Wu 		PMD_DRV_LOG(ERR, "Please stop port first");
604*baca8ec0SJiawen Wu 		return;
60559b46438SJiawen Wu 	}
606*baca8ec0SJiawen Wu 
607*baca8ec0SJiawen Wu 	ngbe_vlan_strip_q_set(dev, queue, on);
60859b46438SJiawen Wu }
60959b46438SJiawen Wu 
61059b46438SJiawen Wu static int
61159b46438SJiawen Wu ngbe_vlan_tpid_set(struct rte_eth_dev *dev,
61259b46438SJiawen Wu 		    enum rte_vlan_type vlan_type,
61359b46438SJiawen Wu 		    uint16_t tpid)
61459b46438SJiawen Wu {
61559b46438SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
61659b46438SJiawen Wu 	int ret = 0;
61759b46438SJiawen Wu 	uint32_t portctrl, vlan_ext, qinq;
61859b46438SJiawen Wu 
61959b46438SJiawen Wu 	portctrl = rd32(hw, NGBE_PORTCTL);
62059b46438SJiawen Wu 
62159b46438SJiawen Wu 	vlan_ext = (portctrl & NGBE_PORTCTL_VLANEXT);
62259b46438SJiawen Wu 	qinq = vlan_ext && (portctrl & NGBE_PORTCTL_QINQ);
62359b46438SJiawen Wu 	switch (vlan_type) {
62459b46438SJiawen Wu 	case RTE_ETH_VLAN_TYPE_INNER:
62559b46438SJiawen Wu 		if (vlan_ext) {
62659b46438SJiawen Wu 			wr32m(hw, NGBE_VLANCTL,
62759b46438SJiawen Wu 				NGBE_VLANCTL_TPID_MASK,
62859b46438SJiawen Wu 				NGBE_VLANCTL_TPID(tpid));
62959b46438SJiawen Wu 			wr32m(hw, NGBE_DMATXCTRL,
63059b46438SJiawen Wu 				NGBE_DMATXCTRL_TPID_MASK,
63159b46438SJiawen Wu 				NGBE_DMATXCTRL_TPID(tpid));
63259b46438SJiawen Wu 		} else {
63359b46438SJiawen Wu 			ret = -ENOTSUP;
63459b46438SJiawen Wu 			PMD_DRV_LOG(ERR,
63559b46438SJiawen Wu 				"Inner type is not supported by single VLAN");
63659b46438SJiawen Wu 		}
63759b46438SJiawen Wu 
63859b46438SJiawen Wu 		if (qinq) {
63959b46438SJiawen Wu 			wr32m(hw, NGBE_TAGTPID(0),
64059b46438SJiawen Wu 				NGBE_TAGTPID_LSB_MASK,
64159b46438SJiawen Wu 				NGBE_TAGTPID_LSB(tpid));
64259b46438SJiawen Wu 		}
64359b46438SJiawen Wu 		break;
64459b46438SJiawen Wu 	case RTE_ETH_VLAN_TYPE_OUTER:
64559b46438SJiawen Wu 		if (vlan_ext) {
64659b46438SJiawen Wu 			/* Only the high 16-bits is valid */
64759b46438SJiawen Wu 			wr32m(hw, NGBE_EXTAG,
64859b46438SJiawen Wu 				NGBE_EXTAG_VLAN_MASK,
64959b46438SJiawen Wu 				NGBE_EXTAG_VLAN(tpid));
65059b46438SJiawen Wu 		} else {
65159b46438SJiawen Wu 			wr32m(hw, NGBE_VLANCTL,
65259b46438SJiawen Wu 				NGBE_VLANCTL_TPID_MASK,
65359b46438SJiawen Wu 				NGBE_VLANCTL_TPID(tpid));
65459b46438SJiawen Wu 			wr32m(hw, NGBE_DMATXCTRL,
65559b46438SJiawen Wu 				NGBE_DMATXCTRL_TPID_MASK,
65659b46438SJiawen Wu 				NGBE_DMATXCTRL_TPID(tpid));
65759b46438SJiawen Wu 		}
65859b46438SJiawen Wu 
65959b46438SJiawen Wu 		if (qinq) {
66059b46438SJiawen Wu 			wr32m(hw, NGBE_TAGTPID(0),
66159b46438SJiawen Wu 				NGBE_TAGTPID_MSB_MASK,
66259b46438SJiawen Wu 				NGBE_TAGTPID_MSB(tpid));
66359b46438SJiawen Wu 		}
66459b46438SJiawen Wu 		break;
66559b46438SJiawen Wu 	default:
66659b46438SJiawen Wu 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
66759b46438SJiawen Wu 		return -EINVAL;
66859b46438SJiawen Wu 	}
66959b46438SJiawen Wu 
67059b46438SJiawen Wu 	return ret;
67159b46438SJiawen Wu }
67259b46438SJiawen Wu 
67359b46438SJiawen Wu void
67459b46438SJiawen Wu ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
67559b46438SJiawen Wu {
67659b46438SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
67759b46438SJiawen Wu 	uint32_t vlnctrl;
67859b46438SJiawen Wu 
67959b46438SJiawen Wu 	PMD_INIT_FUNC_TRACE();
68059b46438SJiawen Wu 
68159b46438SJiawen Wu 	/* Filter Table Disable */
68259b46438SJiawen Wu 	vlnctrl = rd32(hw, NGBE_VLANCTL);
68359b46438SJiawen Wu 	vlnctrl &= ~NGBE_VLANCTL_VFE;
68459b46438SJiawen Wu 	wr32(hw, NGBE_VLANCTL, vlnctrl);
68559b46438SJiawen Wu }
68659b46438SJiawen Wu 
68759b46438SJiawen Wu void
68859b46438SJiawen Wu ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
68959b46438SJiawen Wu {
69059b46438SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
69159b46438SJiawen Wu 	struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
69259b46438SJiawen Wu 	uint32_t vlnctrl;
69359b46438SJiawen Wu 	uint16_t i;
69459b46438SJiawen Wu 
69559b46438SJiawen Wu 	PMD_INIT_FUNC_TRACE();
69659b46438SJiawen Wu 
69759b46438SJiawen Wu 	/* Filter Table Enable */
69859b46438SJiawen Wu 	vlnctrl = rd32(hw, NGBE_VLANCTL);
69959b46438SJiawen Wu 	vlnctrl &= ~NGBE_VLANCTL_CFIENA;
70059b46438SJiawen Wu 	vlnctrl |= NGBE_VLANCTL_VFE;
70159b46438SJiawen Wu 	wr32(hw, NGBE_VLANCTL, vlnctrl);
70259b46438SJiawen Wu 
70359b46438SJiawen Wu 	/* write whatever is in local vfta copy */
70459b46438SJiawen Wu 	for (i = 0; i < NGBE_VFTA_SIZE; i++)
70559b46438SJiawen Wu 		wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
70659b46438SJiawen Wu }
70759b46438SJiawen Wu 
70859b46438SJiawen Wu void
70959b46438SJiawen Wu ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
71059b46438SJiawen Wu {
71159b46438SJiawen Wu 	struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(dev);
71259b46438SJiawen Wu 	struct ngbe_rx_queue *rxq;
71359b46438SJiawen Wu 
71459b46438SJiawen Wu 	if (queue >= NGBE_MAX_RX_QUEUE_NUM)
71559b46438SJiawen Wu 		return;
71659b46438SJiawen Wu 
71759b46438SJiawen Wu 	if (on)
71859b46438SJiawen Wu 		NGBE_SET_HWSTRIP(hwstrip, queue);
71959b46438SJiawen Wu 	else
72059b46438SJiawen Wu 		NGBE_CLEAR_HWSTRIP(hwstrip, queue);
72159b46438SJiawen Wu 
72259b46438SJiawen Wu 	if (queue >= dev->data->nb_rx_queues)
72359b46438SJiawen Wu 		return;
72459b46438SJiawen Wu 
72559b46438SJiawen Wu 	rxq = dev->data->rx_queues[queue];
72659b46438SJiawen Wu 
72759b46438SJiawen Wu 	if (on) {
72859b46438SJiawen Wu 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
72959b46438SJiawen Wu 		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
73059b46438SJiawen Wu 	} else {
73159b46438SJiawen Wu 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
73259b46438SJiawen Wu 		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
73359b46438SJiawen Wu 	}
73459b46438SJiawen Wu }
73559b46438SJiawen Wu 
73659b46438SJiawen Wu static void
73759b46438SJiawen Wu ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
73859b46438SJiawen Wu {
73959b46438SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
74059b46438SJiawen Wu 	uint32_t ctrl;
74159b46438SJiawen Wu 
74259b46438SJiawen Wu 	PMD_INIT_FUNC_TRACE();
74359b46438SJiawen Wu 
74459b46438SJiawen Wu 	ctrl = rd32(hw, NGBE_RXCFG(queue));
74559b46438SJiawen Wu 	ctrl &= ~NGBE_RXCFG_VLAN;
74659b46438SJiawen Wu 	wr32(hw, NGBE_RXCFG(queue), ctrl);
74759b46438SJiawen Wu 
74859b46438SJiawen Wu 	/* record those setting for HW strip per queue */
74959b46438SJiawen Wu 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
75059b46438SJiawen Wu }
75159b46438SJiawen Wu 
75259b46438SJiawen Wu static void
75359b46438SJiawen Wu ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
75459b46438SJiawen Wu {
75559b46438SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
75659b46438SJiawen Wu 	uint32_t ctrl;
75759b46438SJiawen Wu 
75859b46438SJiawen Wu 	PMD_INIT_FUNC_TRACE();
75959b46438SJiawen Wu 
76059b46438SJiawen Wu 	ctrl = rd32(hw, NGBE_RXCFG(queue));
76159b46438SJiawen Wu 	ctrl |= NGBE_RXCFG_VLAN;
76259b46438SJiawen Wu 	wr32(hw, NGBE_RXCFG(queue), ctrl);
76359b46438SJiawen Wu 
76459b46438SJiawen Wu 	/* record those setting for HW strip per queue */
76559b46438SJiawen Wu 	ngbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
76659b46438SJiawen Wu }
76759b46438SJiawen Wu 
76859b46438SJiawen Wu static void
76959b46438SJiawen Wu ngbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
77059b46438SJiawen Wu {
77159b46438SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
77259b46438SJiawen Wu 	uint32_t ctrl;
77359b46438SJiawen Wu 
77459b46438SJiawen Wu 	PMD_INIT_FUNC_TRACE();
77559b46438SJiawen Wu 
77659b46438SJiawen Wu 	ctrl = rd32(hw, NGBE_PORTCTL);
77759b46438SJiawen Wu 	ctrl &= ~NGBE_PORTCTL_VLANEXT;
77859b46438SJiawen Wu 	ctrl &= ~NGBE_PORTCTL_QINQ;
77959b46438SJiawen Wu 	wr32(hw, NGBE_PORTCTL, ctrl);
78059b46438SJiawen Wu }
78159b46438SJiawen Wu 
78259b46438SJiawen Wu static void
78359b46438SJiawen Wu ngbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
78459b46438SJiawen Wu {
78559b46438SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
78659b46438SJiawen Wu 	uint32_t ctrl;
78759b46438SJiawen Wu 
78859b46438SJiawen Wu 	PMD_INIT_FUNC_TRACE();
78959b46438SJiawen Wu 
79059b46438SJiawen Wu 	ctrl  = rd32(hw, NGBE_PORTCTL);
79159b46438SJiawen Wu 	ctrl |= NGBE_PORTCTL_VLANEXT | NGBE_PORTCTL_QINQ;
79259b46438SJiawen Wu 	wr32(hw, NGBE_PORTCTL, ctrl);
79359b46438SJiawen Wu }
79459b46438SJiawen Wu 
79559b46438SJiawen Wu static void
79659b46438SJiawen Wu ngbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
79759b46438SJiawen Wu {
79859b46438SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
79959b46438SJiawen Wu 	uint32_t ctrl;
80059b46438SJiawen Wu 
80159b46438SJiawen Wu 	PMD_INIT_FUNC_TRACE();
80259b46438SJiawen Wu 
80359b46438SJiawen Wu 	ctrl = rd32(hw, NGBE_PORTCTL);
80459b46438SJiawen Wu 	ctrl &= ~NGBE_PORTCTL_QINQ;
80559b46438SJiawen Wu 	wr32(hw, NGBE_PORTCTL, ctrl);
80659b46438SJiawen Wu }
80759b46438SJiawen Wu 
80859b46438SJiawen Wu static void
80959b46438SJiawen Wu ngbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
81059b46438SJiawen Wu {
81159b46438SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
81259b46438SJiawen Wu 	uint32_t ctrl;
81359b46438SJiawen Wu 
81459b46438SJiawen Wu 	PMD_INIT_FUNC_TRACE();
81559b46438SJiawen Wu 
81659b46438SJiawen Wu 	ctrl  = rd32(hw, NGBE_PORTCTL);
81759b46438SJiawen Wu 	ctrl |= NGBE_PORTCTL_QINQ | NGBE_PORTCTL_VLANEXT;
81859b46438SJiawen Wu 	wr32(hw, NGBE_PORTCTL, ctrl);
81959b46438SJiawen Wu }
82059b46438SJiawen Wu 
82159b46438SJiawen Wu void
82259b46438SJiawen Wu ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
82359b46438SJiawen Wu {
82459b46438SJiawen Wu 	struct ngbe_rx_queue *rxq;
82559b46438SJiawen Wu 	uint16_t i;
82659b46438SJiawen Wu 
82759b46438SJiawen Wu 	PMD_INIT_FUNC_TRACE();
82859b46438SJiawen Wu 
82959b46438SJiawen Wu 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
83059b46438SJiawen Wu 		rxq = dev->data->rx_queues[i];
83159b46438SJiawen Wu 
83259b46438SJiawen Wu 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
833*baca8ec0SJiawen Wu 			ngbe_vlan_strip_q_set(dev, i, 1);
83459b46438SJiawen Wu 		else
835*baca8ec0SJiawen Wu 			ngbe_vlan_strip_q_set(dev, i, 0);
83659b46438SJiawen Wu 	}
83759b46438SJiawen Wu }
83859b46438SJiawen Wu 
83959b46438SJiawen Wu void
84059b46438SJiawen Wu ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
84159b46438SJiawen Wu {
84259b46438SJiawen Wu 	uint16_t i;
84359b46438SJiawen Wu 	struct rte_eth_rxmode *rxmode;
84459b46438SJiawen Wu 	struct ngbe_rx_queue *rxq;
84559b46438SJiawen Wu 
84659b46438SJiawen Wu 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
84759b46438SJiawen Wu 		rxmode = &dev->data->dev_conf.rxmode;
84859b46438SJiawen Wu 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
84959b46438SJiawen Wu 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
85059b46438SJiawen Wu 				rxq = dev->data->rx_queues[i];
85159b46438SJiawen Wu 				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
85259b46438SJiawen Wu 			}
85359b46438SJiawen Wu 		else
85459b46438SJiawen Wu 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
85559b46438SJiawen Wu 				rxq = dev->data->rx_queues[i];
85659b46438SJiawen Wu 				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
85759b46438SJiawen Wu 			}
85859b46438SJiawen Wu 	}
85959b46438SJiawen Wu }
86059b46438SJiawen Wu 
86159b46438SJiawen Wu static int
86259b46438SJiawen Wu ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
86359b46438SJiawen Wu {
86459b46438SJiawen Wu 	struct rte_eth_rxmode *rxmode;
86559b46438SJiawen Wu 	rxmode = &dev->data->dev_conf.rxmode;
86659b46438SJiawen Wu 
86759b46438SJiawen Wu 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
86859b46438SJiawen Wu 		ngbe_vlan_hw_strip_config(dev);
86959b46438SJiawen Wu 
87059b46438SJiawen Wu 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
87159b46438SJiawen Wu 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
87259b46438SJiawen Wu 			ngbe_vlan_hw_filter_enable(dev);
87359b46438SJiawen Wu 		else
87459b46438SJiawen Wu 			ngbe_vlan_hw_filter_disable(dev);
87559b46438SJiawen Wu 	}
87659b46438SJiawen Wu 
87759b46438SJiawen Wu 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
87859b46438SJiawen Wu 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
87959b46438SJiawen Wu 			ngbe_vlan_hw_extend_enable(dev);
88059b46438SJiawen Wu 		else
88159b46438SJiawen Wu 			ngbe_vlan_hw_extend_disable(dev);
88259b46438SJiawen Wu 	}
88359b46438SJiawen Wu 
88459b46438SJiawen Wu 	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
88559b46438SJiawen Wu 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
88659b46438SJiawen Wu 			ngbe_qinq_hw_strip_enable(dev);
88759b46438SJiawen Wu 		else
88859b46438SJiawen Wu 			ngbe_qinq_hw_strip_disable(dev);
88959b46438SJiawen Wu 	}
89059b46438SJiawen Wu 
89159b46438SJiawen Wu 	return 0;
89259b46438SJiawen Wu }
89359b46438SJiawen Wu 
89459b46438SJiawen Wu static int
89559b46438SJiawen Wu ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
89659b46438SJiawen Wu {
897*baca8ec0SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
898*baca8ec0SJiawen Wu 
899*baca8ec0SJiawen Wu 	if (!hw->adapter_stopped && (mask & RTE_ETH_VLAN_STRIP_MASK)) {
900*baca8ec0SJiawen Wu 		PMD_DRV_LOG(ERR, "Please stop port first");
901*baca8ec0SJiawen Wu 		return -EPERM;
902*baca8ec0SJiawen Wu 	}
903*baca8ec0SJiawen Wu 
90459b46438SJiawen Wu 	ngbe_config_vlan_strip_on_all_queues(dev, mask);
90559b46438SJiawen Wu 
90659b46438SJiawen Wu 	ngbe_vlan_offload_config(dev, mask);
90759b46438SJiawen Wu 
90859b46438SJiawen Wu 	return 0;
90959b46438SJiawen Wu }
91059b46438SJiawen Wu 
91159b46438SJiawen Wu static int
912b9246b8fSJiawen Wu ngbe_dev_configure(struct rte_eth_dev *dev)
913b9246b8fSJiawen Wu {
914b9246b8fSJiawen Wu 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
91543b7e5eaSJiawen Wu 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
916b9246b8fSJiawen Wu 
917b9246b8fSJiawen Wu 	PMD_INIT_FUNC_TRACE();
918b9246b8fSJiawen Wu 
9190779d7f6SJiawen Wu 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
9200779d7f6SJiawen Wu 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
9210779d7f6SJiawen Wu 
922b9246b8fSJiawen Wu 	/* set flag to update link status after init */
923b9246b8fSJiawen Wu 	intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
924b9246b8fSJiawen Wu 
92543b7e5eaSJiawen Wu 	/*
92643b7e5eaSJiawen Wu 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
92743b7e5eaSJiawen Wu 	 * allocation Rx preconditions we will reset it.
92843b7e5eaSJiawen Wu 	 */
92943b7e5eaSJiawen Wu 	adapter->rx_bulk_alloc_allowed = true;
930e94c20c3SJiawen Wu 	adapter->rx_vec_allowed = true;
93143b7e5eaSJiawen Wu 
932b9246b8fSJiawen Wu 	return 0;
933b9246b8fSJiawen Wu }
934b9246b8fSJiawen Wu 
9353518df57SJiawen Wu static void
9363518df57SJiawen Wu ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
9373518df57SJiawen Wu {
9383518df57SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
9393518df57SJiawen Wu 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
9403518df57SJiawen Wu 
9413518df57SJiawen Wu 	wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
9423518df57SJiawen Wu 	wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
9433518df57SJiawen Wu 	wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
9443518df57SJiawen Wu 	if (hw->phy.type == ngbe_phy_yt8521s_sfi)
9453518df57SJiawen Wu 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
9463518df57SJiawen Wu 	else
9473518df57SJiawen Wu 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
9483518df57SJiawen Wu 
949f3ff9f28SJiawen Wu 	intr->mask_misc |= NGBE_ICRMISC_GPIO | NGBE_ICRMISC_HEAT;
9503518df57SJiawen Wu }
9513518df57SJiawen Wu 
9523518df57SJiawen Wu /*
9533518df57SJiawen Wu  * Configure device link speed and setup link.
9543518df57SJiawen Wu  * It returns 0 on success.
9553518df57SJiawen Wu  */
9563518df57SJiawen Wu static int
9573518df57SJiawen Wu ngbe_dev_start(struct rte_eth_dev *dev)
9583518df57SJiawen Wu {
9593518df57SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
960fdb1e851SJiawen Wu 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
9613518df57SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
962d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
9633518df57SJiawen Wu 	uint32_t intr_vector = 0;
9643518df57SJiawen Wu 	int err;
9653518df57SJiawen Wu 	bool link_up = false, negotiate = false;
9663518df57SJiawen Wu 	uint32_t speed = 0;
9673518df57SJiawen Wu 	uint32_t allowed_speeds = 0;
96859b46438SJiawen Wu 	int mask = 0;
9693518df57SJiawen Wu 	int status;
9703518df57SJiawen Wu 	uint32_t *link_speeds;
9713518df57SJiawen Wu 
9723518df57SJiawen Wu 	PMD_INIT_FUNC_TRACE();
9733518df57SJiawen Wu 
9743518df57SJiawen Wu 	/* disable uio/vfio intr/eventfd mapping */
9753518df57SJiawen Wu 	rte_intr_disable(intr_handle);
9763518df57SJiawen Wu 
9773518df57SJiawen Wu 	/* stop adapter */
9783518df57SJiawen Wu 	hw->adapter_stopped = 0;
9793518df57SJiawen Wu 
9803518df57SJiawen Wu 	/* reinitialize adapter, this calls reset and start */
9813518df57SJiawen Wu 	hw->nb_rx_queues = dev->data->nb_rx_queues;
9823518df57SJiawen Wu 	hw->nb_tx_queues = dev->data->nb_tx_queues;
9833518df57SJiawen Wu 	status = ngbe_pf_reset_hw(hw);
9843518df57SJiawen Wu 	if (status != 0)
9853518df57SJiawen Wu 		return -1;
9863518df57SJiawen Wu 	hw->mac.start_hw(hw);
9873518df57SJiawen Wu 	hw->mac.get_link_status = true;
9883518df57SJiawen Wu 
989ac6c5e9aSJiawen Wu 	ngbe_set_pcie_master(hw, true);
990ac6c5e9aSJiawen Wu 
99160229dcfSJiawen Wu 	/* configure PF module if SRIOV enabled */
99260229dcfSJiawen Wu 	ngbe_pf_host_configure(dev);
99360229dcfSJiawen Wu 
9943518df57SJiawen Wu 	ngbe_dev_phy_intr_setup(dev);
9953518df57SJiawen Wu 
9963518df57SJiawen Wu 	/* check and configure queue intr-vector mapping */
9973518df57SJiawen Wu 	if ((rte_intr_cap_multiple(intr_handle) ||
9983518df57SJiawen Wu 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
9993518df57SJiawen Wu 	    dev->data->dev_conf.intr_conf.rxq != 0) {
10003518df57SJiawen Wu 		intr_vector = dev->data->nb_rx_queues;
10013518df57SJiawen Wu 		if (rte_intr_efd_enable(intr_handle, intr_vector))
10023518df57SJiawen Wu 			return -1;
10033518df57SJiawen Wu 	}
10043518df57SJiawen Wu 
1005d61138d4SHarman Kalra 	if (rte_intr_dp_is_en(intr_handle)) {
1006d61138d4SHarman Kalra 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
1007d61138d4SHarman Kalra 						   dev->data->nb_rx_queues)) {
10083518df57SJiawen Wu 			PMD_INIT_LOG(ERR,
10093518df57SJiawen Wu 				     "Failed to allocate %d rx_queues intr_vec",
10103518df57SJiawen Wu 				     dev->data->nb_rx_queues);
10113518df57SJiawen Wu 			return -ENOMEM;
10123518df57SJiawen Wu 		}
10133518df57SJiawen Wu 	}
10143518df57SJiawen Wu 
10157be78d02SJosh Soref 	/* configure MSI-X for sleep until Rx interrupt */
10163518df57SJiawen Wu 	ngbe_configure_msix(dev);
10173518df57SJiawen Wu 
10183518df57SJiawen Wu 	/* initialize transmission unit */
10193518df57SJiawen Wu 	ngbe_dev_tx_init(dev);
10203518df57SJiawen Wu 
10213518df57SJiawen Wu 	/* This can fail when allocating mbufs for descriptor rings */
10223518df57SJiawen Wu 	err = ngbe_dev_rx_init(dev);
10233518df57SJiawen Wu 	if (err != 0) {
10243518df57SJiawen Wu 		PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
10253518df57SJiawen Wu 		goto error;
10263518df57SJiawen Wu 	}
10273518df57SJiawen Wu 
102859b46438SJiawen Wu 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
102959b46438SJiawen Wu 		RTE_ETH_VLAN_EXTEND_MASK;
103059b46438SJiawen Wu 	err = ngbe_vlan_offload_config(dev, mask);
103159b46438SJiawen Wu 	if (err != 0) {
103259b46438SJiawen Wu 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
103359b46438SJiawen Wu 		goto error;
103459b46438SJiawen Wu 	}
103559b46438SJiawen Wu 
1036ccdeca8eSJiawen Wu 	hw->mac.setup_pba(hw);
1037d19fa5a1SJiawen Wu 	ngbe_pbthresh_set(dev);
103859b46438SJiawen Wu 	ngbe_configure_port(dev);
103959b46438SJiawen Wu 
10403518df57SJiawen Wu 	err = ngbe_dev_rxtx_start(dev);
10413518df57SJiawen Wu 	if (err < 0) {
10423518df57SJiawen Wu 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
10433518df57SJiawen Wu 		goto error;
10443518df57SJiawen Wu 	}
10453518df57SJiawen Wu 
1046d4a3fe69SJiawen Wu 	/* Skip link setup if loopback mode is enabled. */
1047d4a3fe69SJiawen Wu 	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
1048d4a3fe69SJiawen Wu 		goto skip_link_setup;
1049d4a3fe69SJiawen Wu 
105021f702d5SJiawen Wu 	hw->lsc = dev->data->dev_conf.intr_conf.lsc;
105121f702d5SJiawen Wu 
10523518df57SJiawen Wu 	err = hw->mac.check_link(hw, &speed, &link_up, 0);
10533518df57SJiawen Wu 	if (err != 0)
10543518df57SJiawen Wu 		goto error;
10553518df57SJiawen Wu 	dev->data->dev_link.link_status = link_up;
10563518df57SJiawen Wu 
10573518df57SJiawen Wu 	link_speeds = &dev->data->dev_conf.link_speeds;
1058295968d1SFerruh Yigit 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
10593518df57SJiawen Wu 		negotiate = true;
10603518df57SJiawen Wu 
10613518df57SJiawen Wu 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
10623518df57SJiawen Wu 	if (err != 0)
10633518df57SJiawen Wu 		goto error;
10643518df57SJiawen Wu 
10653518df57SJiawen Wu 	allowed_speeds = 0;
10663518df57SJiawen Wu 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
1067295968d1SFerruh Yigit 		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
10683518df57SJiawen Wu 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
1069295968d1SFerruh Yigit 		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
10703518df57SJiawen Wu 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
1071295968d1SFerruh Yigit 		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
10723518df57SJiawen Wu 
1073bf1bc993SJiawen Wu 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
10743518df57SJiawen Wu 		PMD_INIT_LOG(ERR, "Invalid link setting");
10753518df57SJiawen Wu 		goto error;
10763518df57SJiawen Wu 	}
10773518df57SJiawen Wu 
10783518df57SJiawen Wu 	speed = 0x0;
1079295968d1SFerruh Yigit 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
10803518df57SJiawen Wu 		speed = hw->mac.default_speeds;
10813518df57SJiawen Wu 	} else {
1082295968d1SFerruh Yigit 		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
10833518df57SJiawen Wu 			speed |= NGBE_LINK_SPEED_1GB_FULL;
1084295968d1SFerruh Yigit 		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
10853518df57SJiawen Wu 			speed |= NGBE_LINK_SPEED_100M_FULL;
1086295968d1SFerruh Yigit 		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
10873518df57SJiawen Wu 			speed |= NGBE_LINK_SPEED_10M_FULL;
10883518df57SJiawen Wu 	}
10893518df57SJiawen Wu 
10905f1ab0d5SJiawen Wu 	if (!hw->ncsi_enabled) {
109191bc12c5SJiawen Wu 		err = hw->phy.init_hw(hw);
109291bc12c5SJiawen Wu 		if (err != 0) {
109391bc12c5SJiawen Wu 			PMD_INIT_LOG(ERR, "PHY init failed");
109491bc12c5SJiawen Wu 			goto error;
109591bc12c5SJiawen Wu 		}
10965f1ab0d5SJiawen Wu 	}
10973518df57SJiawen Wu 	err = hw->mac.setup_link(hw, speed, link_up);
10983518df57SJiawen Wu 	if (err != 0)
10993518df57SJiawen Wu 		goto error;
11003518df57SJiawen Wu 
1101d4a3fe69SJiawen Wu skip_link_setup:
1102d4a3fe69SJiawen Wu 
11033518df57SJiawen Wu 	if (rte_intr_allow_others(intr_handle)) {
11043518df57SJiawen Wu 		ngbe_dev_misc_interrupt_setup(dev);
11053518df57SJiawen Wu 		/* check if lsc interrupt is enabled */
11063518df57SJiawen Wu 		if (dev->data->dev_conf.intr_conf.lsc != 0)
11073518df57SJiawen Wu 			ngbe_dev_lsc_interrupt_setup(dev, TRUE);
11083518df57SJiawen Wu 		else
11093518df57SJiawen Wu 			ngbe_dev_lsc_interrupt_setup(dev, FALSE);
11103518df57SJiawen Wu 		ngbe_dev_macsec_interrupt_setup(dev);
11113518df57SJiawen Wu 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
11123518df57SJiawen Wu 	} else {
11133518df57SJiawen Wu 		rte_intr_callback_unregister(intr_handle,
11143518df57SJiawen Wu 					     ngbe_dev_interrupt_handler, dev);
11153518df57SJiawen Wu 		if (dev->data->dev_conf.intr_conf.lsc != 0)
11163518df57SJiawen Wu 			PMD_INIT_LOG(INFO,
11173518df57SJiawen Wu 				     "LSC won't enable because of no intr multiplex");
11183518df57SJiawen Wu 	}
11193518df57SJiawen Wu 
11203518df57SJiawen Wu 	/* check if rxq interrupt is enabled */
11213518df57SJiawen Wu 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
11223518df57SJiawen Wu 	    rte_intr_dp_is_en(intr_handle))
11233518df57SJiawen Wu 		ngbe_dev_rxq_interrupt_setup(dev);
11243518df57SJiawen Wu 
11253518df57SJiawen Wu 	/* enable UIO/VFIO intr/eventfd mapping */
11263518df57SJiawen Wu 	rte_intr_enable(intr_handle);
11273518df57SJiawen Wu 
11283518df57SJiawen Wu 	/* resume enabled intr since HW reset */
11293518df57SJiawen Wu 	ngbe_enable_intr(dev);
11303518df57SJiawen Wu 
11311c44384fSJiawen Wu 	if (hw->gpio_ctl) {
11323518df57SJiawen Wu 		/* gpio0 is used to power on/off control*/
11333518df57SJiawen Wu 		wr32(hw, NGBE_GPIODATA, 0);
11343518df57SJiawen Wu 	}
11353518df57SJiawen Wu 
11363518df57SJiawen Wu 	/*
11373518df57SJiawen Wu 	 * Update link status right before return, because it may
11383518df57SJiawen Wu 	 * start link configuration process in a separate thread.
11393518df57SJiawen Wu 	 */
11403518df57SJiawen Wu 	ngbe_dev_link_update(dev, 0);
11413518df57SJiawen Wu 
1142fdb1e851SJiawen Wu 	ngbe_read_stats_registers(hw, hw_stats);
1143fdb1e851SJiawen Wu 	hw->offset_loaded = 1;
1144fdb1e851SJiawen Wu 
11453518df57SJiawen Wu 	return 0;
11463518df57SJiawen Wu 
11473518df57SJiawen Wu error:
11483518df57SJiawen Wu 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
11493518df57SJiawen Wu 	ngbe_dev_clear_queues(dev);
11503518df57SJiawen Wu 	return -EIO;
11513518df57SJiawen Wu }
11523518df57SJiawen Wu 
11533518df57SJiawen Wu /*
11543518df57SJiawen Wu  * Stop device: disable rx and tx functions to allow for reconfiguring.
11553518df57SJiawen Wu  */
11563518df57SJiawen Wu static int
11573518df57SJiawen Wu ngbe_dev_stop(struct rte_eth_dev *dev)
11583518df57SJiawen Wu {
11593518df57SJiawen Wu 	struct rte_eth_link link;
11600779d7f6SJiawen Wu 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
11613518df57SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
116260229dcfSJiawen Wu 	struct ngbe_vf_info *vfinfo = *NGBE_DEV_VFDATA(dev);
11633518df57SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1164d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
116560229dcfSJiawen Wu 	int vf;
11663518df57SJiawen Wu 
11673518df57SJiawen Wu 	if (hw->adapter_stopped)
11683dd64cf8SJiawen Wu 		goto out;
11693518df57SJiawen Wu 
11703518df57SJiawen Wu 	PMD_INIT_FUNC_TRACE();
11713518df57SJiawen Wu 
11721c44384fSJiawen Wu 	if (hw->gpio_ctl) {
11733518df57SJiawen Wu 		/* gpio0 is used to power on/off control*/
11743518df57SJiawen Wu 		wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
11753518df57SJiawen Wu 	}
11763518df57SJiawen Wu 
11773518df57SJiawen Wu 	/* disable interrupts */
11783518df57SJiawen Wu 	ngbe_disable_intr(hw);
11793518df57SJiawen Wu 
11803518df57SJiawen Wu 	/* reset the NIC */
11813518df57SJiawen Wu 	ngbe_pf_reset_hw(hw);
11823518df57SJiawen Wu 	hw->adapter_stopped = 0;
11833518df57SJiawen Wu 
11843518df57SJiawen Wu 	/* stop adapter */
11853518df57SJiawen Wu 	ngbe_stop_hw(hw);
11863518df57SJiawen Wu 
118760229dcfSJiawen Wu 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
118860229dcfSJiawen Wu 		vfinfo[vf].clear_to_send = false;
118960229dcfSJiawen Wu 
11903518df57SJiawen Wu 	ngbe_dev_clear_queues(dev);
11913518df57SJiawen Wu 
119279f3128dSJiawen Wu 	/* Clear stored conf */
119379f3128dSJiawen Wu 	dev->data->scattered_rx = 0;
119479f3128dSJiawen Wu 
11953518df57SJiawen Wu 	/* Clear recorded link status */
11963518df57SJiawen Wu 	memset(&link, 0, sizeof(link));
11973518df57SJiawen Wu 	rte_eth_linkstatus_set(dev, &link);
11983518df57SJiawen Wu 
11993518df57SJiawen Wu 	if (!rte_intr_allow_others(intr_handle))
12003518df57SJiawen Wu 		/* resume to the default handler */
12013518df57SJiawen Wu 		rte_intr_callback_register(intr_handle,
12023518df57SJiawen Wu 					   ngbe_dev_interrupt_handler,
12033518df57SJiawen Wu 					   (void *)dev);
12043518df57SJiawen Wu 
12053518df57SJiawen Wu 	/* Clean datapath event and queue/vec mapping */
12063518df57SJiawen Wu 	rte_intr_efd_disable(intr_handle);
1207d61138d4SHarman Kalra 	rte_intr_vec_list_free(intr_handle);
12083518df57SJiawen Wu 
1209ac6c5e9aSJiawen Wu 	ngbe_set_pcie_master(hw, true);
1210ac6c5e9aSJiawen Wu 
12110779d7f6SJiawen Wu 	adapter->rss_reta_updated = 0;
12120779d7f6SJiawen Wu 
12133518df57SJiawen Wu 	hw->adapter_stopped = true;
12143518df57SJiawen Wu 	dev->data->dev_started = 0;
12153518df57SJiawen Wu 
12163dd64cf8SJiawen Wu out:
12173dd64cf8SJiawen Wu 	/* close phy to prevent reset in dev_close from restarting physical link */
12185f1ab0d5SJiawen Wu 	if (!(hw->wol_enabled || hw->ncsi_enabled))
12193dd64cf8SJiawen Wu 		hw->phy.set_phy_power(hw, false);
12203dd64cf8SJiawen Wu 
12213518df57SJiawen Wu 	return 0;
12223518df57SJiawen Wu }
12233518df57SJiawen Wu 
122468eb13a1SJiawen Wu /*
1225abea8974SJiawen Wu  * Set device link up: power on.
1226abea8974SJiawen Wu  */
1227abea8974SJiawen Wu static int
1228abea8974SJiawen Wu ngbe_dev_set_link_up(struct rte_eth_dev *dev)
1229abea8974SJiawen Wu {
1230abea8974SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1231abea8974SJiawen Wu 
12325f1ab0d5SJiawen Wu 	if (!(hw->ncsi_enabled || hw->wol_enabled))
1233abea8974SJiawen Wu 		hw->phy.set_phy_power(hw, true);
1234abea8974SJiawen Wu 
1235abea8974SJiawen Wu 	return 0;
1236abea8974SJiawen Wu }
1237abea8974SJiawen Wu 
1238abea8974SJiawen Wu /*
1239abea8974SJiawen Wu  * Set device link down: power off.
1240abea8974SJiawen Wu  */
1241abea8974SJiawen Wu static int
1242abea8974SJiawen Wu ngbe_dev_set_link_down(struct rte_eth_dev *dev)
1243abea8974SJiawen Wu {
1244abea8974SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1245abea8974SJiawen Wu 
12465f1ab0d5SJiawen Wu 	if (!(hw->ncsi_enabled || hw->wol_enabled))
1247abea8974SJiawen Wu 		hw->phy.set_phy_power(hw, false);
1248abea8974SJiawen Wu 
1249abea8974SJiawen Wu 	return 0;
1250abea8974SJiawen Wu }
1251abea8974SJiawen Wu 
1252abea8974SJiawen Wu /*
125368eb13a1SJiawen Wu  * Reset and stop device.
125468eb13a1SJiawen Wu  */
125568eb13a1SJiawen Wu static int
125668eb13a1SJiawen Wu ngbe_dev_close(struct rte_eth_dev *dev)
125768eb13a1SJiawen Wu {
1258cc63194eSJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1259cc63194eSJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1260d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1261cc63194eSJiawen Wu 	int retries = 0;
1262cc63194eSJiawen Wu 	int ret;
1263cc63194eSJiawen Wu 
126468eb13a1SJiawen Wu 	PMD_INIT_FUNC_TRACE();
126568eb13a1SJiawen Wu 
1266bf86fb0bSJiawen Wu 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1267bf86fb0bSJiawen Wu 		return 0;
1268bf86fb0bSJiawen Wu 
1269cc63194eSJiawen Wu 	ngbe_pf_reset_hw(hw);
127068eb13a1SJiawen Wu 
1271cc63194eSJiawen Wu 	ngbe_dev_stop(dev);
1272cc63194eSJiawen Wu 
1273cb7be5b5SJiawen Wu 	/* Let firmware take over control of hardware */
1274cb7be5b5SJiawen Wu 	wr32m(hw, NGBE_PORTCTL, NGBE_PORTCTL_DRVLOAD, 0);
1275cb7be5b5SJiawen Wu 
1276cc63194eSJiawen Wu 	ngbe_dev_free_queues(dev);
1277cc63194eSJiawen Wu 
1278ac6c5e9aSJiawen Wu 	ngbe_set_pcie_master(hw, false);
1279ac6c5e9aSJiawen Wu 
1280cc63194eSJiawen Wu 	/* reprogram the RAR[0] in case user changed it. */
1281cc63194eSJiawen Wu 	ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1282cc63194eSJiawen Wu 
1283cc63194eSJiawen Wu 	/* Unlock any pending hardware semaphore */
1284cc63194eSJiawen Wu 	ngbe_swfw_lock_reset(hw);
1285cc63194eSJiawen Wu 
1286cc63194eSJiawen Wu 	/* disable uio intr before callback unregister */
1287cc63194eSJiawen Wu 	rte_intr_disable(intr_handle);
1288cc63194eSJiawen Wu 
1289cc63194eSJiawen Wu 	do {
1290cc63194eSJiawen Wu 		ret = rte_intr_callback_unregister(intr_handle,
1291cc63194eSJiawen Wu 				ngbe_dev_interrupt_handler, dev);
1292cc63194eSJiawen Wu 		if (ret >= 0 || ret == -ENOENT) {
1293cc63194eSJiawen Wu 			break;
1294cc63194eSJiawen Wu 		} else if (ret != -EAGAIN) {
1295cc63194eSJiawen Wu 			PMD_INIT_LOG(ERR,
1296cc63194eSJiawen Wu 				"intr callback unregister failed: %d",
1297cc63194eSJiawen Wu 				ret);
1298cc63194eSJiawen Wu 		}
1299cc63194eSJiawen Wu 		rte_delay_ms(100);
1300cc63194eSJiawen Wu 	} while (retries++ < (10 + NGBE_LINK_UP_TIME));
1301cc63194eSJiawen Wu 
130260229dcfSJiawen Wu 	/* uninitialize PF if max_vfs not zero */
130360229dcfSJiawen Wu 	ngbe_pf_host_uninit(dev);
130460229dcfSJiawen Wu 
1305cc63194eSJiawen Wu 	rte_free(dev->data->mac_addrs);
1306cc63194eSJiawen Wu 	dev->data->mac_addrs = NULL;
1307cc63194eSJiawen Wu 
1308cc63194eSJiawen Wu 	rte_free(dev->data->hash_mac_addrs);
1309cc63194eSJiawen Wu 	dev->data->hash_mac_addrs = NULL;
1310cc63194eSJiawen Wu 
1311cc63194eSJiawen Wu 	return ret;
1312cc63194eSJiawen Wu }
1313cc63194eSJiawen Wu 
1314cc63194eSJiawen Wu /*
1315cc63194eSJiawen Wu  * Reset PF device.
1316cc63194eSJiawen Wu  */
1317cc63194eSJiawen Wu static int
1318cc63194eSJiawen Wu ngbe_dev_reset(struct rte_eth_dev *dev)
1319cc63194eSJiawen Wu {
1320cc63194eSJiawen Wu 	int ret;
1321cc63194eSJiawen Wu 
132260229dcfSJiawen Wu 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
132360229dcfSJiawen Wu 	 * its VF to make them align with it. The detailed notification
132460229dcfSJiawen Wu 	 * mechanism is PMD specific. As to ngbe PF, it is rather complex.
132560229dcfSJiawen Wu 	 * To avoid unexpected behavior in VF, currently reset of PF with
132660229dcfSJiawen Wu 	 * SR-IOV activation is not supported. It might be supported later.
132760229dcfSJiawen Wu 	 */
132860229dcfSJiawen Wu 	if (dev->data->sriov.active)
132960229dcfSJiawen Wu 		return -ENOTSUP;
133060229dcfSJiawen Wu 
1331cc63194eSJiawen Wu 	ret = eth_ngbe_dev_uninit(dev);
1332cc63194eSJiawen Wu 	if (ret != 0)
1333cc63194eSJiawen Wu 		return ret;
1334cc63194eSJiawen Wu 
1335cc63194eSJiawen Wu 	ret = eth_ngbe_dev_init(dev, NULL);
1336cc63194eSJiawen Wu 
1337cc63194eSJiawen Wu 	return ret;
133868eb13a1SJiawen Wu }
133968eb13a1SJiawen Wu 
1340fdb1e851SJiawen Wu #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1341fdb1e851SJiawen Wu 	{                                                       \
1342fdb1e851SJiawen Wu 		uint32_t current_counter = rd32(hw, reg);       \
1343fdb1e851SJiawen Wu 		if (current_counter < last_counter)             \
1344fdb1e851SJiawen Wu 			current_counter += 0x100000000LL;       \
1345fdb1e851SJiawen Wu 		if (!hw->offset_loaded)                         \
1346fdb1e851SJiawen Wu 			last_counter = current_counter;         \
1347fdb1e851SJiawen Wu 		counter = current_counter - last_counter;       \
1348fdb1e851SJiawen Wu 		counter &= 0xFFFFFFFFLL;                        \
1349fdb1e851SJiawen Wu 	}
1350fdb1e851SJiawen Wu 
1351fdb1e851SJiawen Wu #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1352fdb1e851SJiawen Wu 	{                                                                \
1353fdb1e851SJiawen Wu 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1354fdb1e851SJiawen Wu 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1355fdb1e851SJiawen Wu 		uint64_t current_counter = (current_counter_msb << 32) | \
1356fdb1e851SJiawen Wu 			current_counter_lsb;                             \
1357fdb1e851SJiawen Wu 		if (current_counter < last_counter)                      \
1358fdb1e851SJiawen Wu 			current_counter += 0x1000000000LL;               \
1359fdb1e851SJiawen Wu 		if (!hw->offset_loaded)                                  \
1360fdb1e851SJiawen Wu 			last_counter = current_counter;                  \
1361fdb1e851SJiawen Wu 		counter = current_counter - last_counter;                \
1362fdb1e851SJiawen Wu 		counter &= 0xFFFFFFFFFLL;                                \
1363fdb1e851SJiawen Wu 	}
1364fdb1e851SJiawen Wu 
1365fdb1e851SJiawen Wu void
1366fdb1e851SJiawen Wu ngbe_read_stats_registers(struct ngbe_hw *hw,
1367fdb1e851SJiawen Wu 			   struct ngbe_hw_stats *hw_stats)
1368fdb1e851SJiawen Wu {
1369fdb1e851SJiawen Wu 	unsigned int i;
1370fdb1e851SJiawen Wu 
1371fdb1e851SJiawen Wu 	/* QP Stats */
1372fdb1e851SJiawen Wu 	for (i = 0; i < hw->nb_rx_queues; i++) {
1373fdb1e851SJiawen Wu 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
1374fdb1e851SJiawen Wu 			hw->qp_last[i].rx_qp_packets,
1375fdb1e851SJiawen Wu 			hw_stats->qp[i].rx_qp_packets);
1376fdb1e851SJiawen Wu 		UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
1377fdb1e851SJiawen Wu 			hw->qp_last[i].rx_qp_bytes,
1378fdb1e851SJiawen Wu 			hw_stats->qp[i].rx_qp_bytes);
1379fdb1e851SJiawen Wu 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
1380fdb1e851SJiawen Wu 			hw->qp_last[i].rx_qp_mc_packets,
1381fdb1e851SJiawen Wu 			hw_stats->qp[i].rx_qp_mc_packets);
1382fdb1e851SJiawen Wu 		UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
1383fdb1e851SJiawen Wu 			hw->qp_last[i].rx_qp_bc_packets,
1384fdb1e851SJiawen Wu 			hw_stats->qp[i].rx_qp_bc_packets);
1385fdb1e851SJiawen Wu 	}
1386fdb1e851SJiawen Wu 
1387fdb1e851SJiawen Wu 	for (i = 0; i < hw->nb_tx_queues; i++) {
1388fdb1e851SJiawen Wu 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
1389fdb1e851SJiawen Wu 			hw->qp_last[i].tx_qp_packets,
1390fdb1e851SJiawen Wu 			hw_stats->qp[i].tx_qp_packets);
1391fdb1e851SJiawen Wu 		UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
1392fdb1e851SJiawen Wu 			hw->qp_last[i].tx_qp_bytes,
1393fdb1e851SJiawen Wu 			hw_stats->qp[i].tx_qp_bytes);
1394fdb1e851SJiawen Wu 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
1395fdb1e851SJiawen Wu 			hw->qp_last[i].tx_qp_mc_packets,
1396fdb1e851SJiawen Wu 			hw_stats->qp[i].tx_qp_mc_packets);
1397fdb1e851SJiawen Wu 		UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
1398fdb1e851SJiawen Wu 			hw->qp_last[i].tx_qp_bc_packets,
1399fdb1e851SJiawen Wu 			hw_stats->qp[i].tx_qp_bc_packets);
1400fdb1e851SJiawen Wu 	}
1401fdb1e851SJiawen Wu 
1402fdb1e851SJiawen Wu 	/* PB Stats */
1403fdb1e851SJiawen Wu 	hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
1404fdb1e851SJiawen Wu 	hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
1405fdb1e851SJiawen Wu 	hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
1406fdb1e851SJiawen Wu 	hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
1407fdb1e851SJiawen Wu 	hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
1408fdb1e851SJiawen Wu 	hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
1409fdb1e851SJiawen Wu 
1410fdb1e851SJiawen Wu 	hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
1411fdb1e851SJiawen Wu 	hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
1412fdb1e851SJiawen Wu 
1413fdb1e851SJiawen Wu 	/* DMA Stats */
1414fdb1e851SJiawen Wu 	hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
1415fee9350fSJiawen Wu 	hw_stats->tx_dma_drop += rd32(hw, NGBE_DMATXDROP);
1416fdb1e851SJiawen Wu 	hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
1417fdb1e851SJiawen Wu 	hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
1418fdb1e851SJiawen Wu 	hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
1419fdb1e851SJiawen Wu 	hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
1420fdb1e851SJiawen Wu 	hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
1421fdb1e851SJiawen Wu 
1422fdb1e851SJiawen Wu 	/* MAC Stats */
1423fdb1e851SJiawen Wu 	hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
1424fdb1e851SJiawen Wu 	hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
1425fdb1e851SJiawen Wu 	hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
1426fdb1e851SJiawen Wu 
1427fdb1e851SJiawen Wu 	hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
1428fdb1e851SJiawen Wu 	hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
1429fdb1e851SJiawen Wu 	hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
1430fdb1e851SJiawen Wu 
1431fdb1e851SJiawen Wu 	hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
1432fdb1e851SJiawen Wu 	hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
1433fdb1e851SJiawen Wu 
1434fdb1e851SJiawen Wu 	hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
1435fdb1e851SJiawen Wu 	hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
1436fdb1e851SJiawen Wu 	hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
1437fdb1e851SJiawen Wu 	hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
1438fdb1e851SJiawen Wu 	hw_stats->rx_size_512_to_1023_packets +=
1439fdb1e851SJiawen Wu 			rd64(hw, NGBE_MACRX512TO1023L);
1440fdb1e851SJiawen Wu 	hw_stats->rx_size_1024_to_max_packets +=
1441fdb1e851SJiawen Wu 			rd64(hw, NGBE_MACRX1024TOMAXL);
1442fdb1e851SJiawen Wu 	hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
1443fdb1e851SJiawen Wu 	hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
1444fdb1e851SJiawen Wu 	hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
1445fdb1e851SJiawen Wu 	hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
1446fdb1e851SJiawen Wu 	hw_stats->tx_size_512_to_1023_packets +=
1447fdb1e851SJiawen Wu 			rd64(hw, NGBE_MACTX512TO1023L);
1448fdb1e851SJiawen Wu 	hw_stats->tx_size_1024_to_max_packets +=
1449fdb1e851SJiawen Wu 			rd64(hw, NGBE_MACTX1024TOMAXL);
1450fdb1e851SJiawen Wu 
1451fdb1e851SJiawen Wu 	hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
1452fee9350fSJiawen Wu 	hw_stats->rx_oversize_cnt += rd32(hw, NGBE_MACRXOVERSIZE);
1453fdb1e851SJiawen Wu 	hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
1454fdb1e851SJiawen Wu 
1455fdb1e851SJiawen Wu 	/* MNG Stats */
1456fdb1e851SJiawen Wu 	hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
1457fdb1e851SJiawen Wu 	hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
1458fdb1e851SJiawen Wu 	hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
1459fdb1e851SJiawen Wu 	hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
1460fdb1e851SJiawen Wu 
1461fdb1e851SJiawen Wu 	/* MACsec Stats */
1462fdb1e851SJiawen Wu 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
1463fdb1e851SJiawen Wu 	hw_stats->tx_macsec_pkts_encrypted +=
1464fdb1e851SJiawen Wu 			rd32(hw, NGBE_LSECTX_ENCPKT);
1465fdb1e851SJiawen Wu 	hw_stats->tx_macsec_pkts_protected +=
1466fdb1e851SJiawen Wu 			rd32(hw, NGBE_LSECTX_PROTPKT);
1467fdb1e851SJiawen Wu 	hw_stats->tx_macsec_octets_encrypted +=
1468fdb1e851SJiawen Wu 			rd32(hw, NGBE_LSECTX_ENCOCT);
1469fdb1e851SJiawen Wu 	hw_stats->tx_macsec_octets_protected +=
1470fdb1e851SJiawen Wu 			rd32(hw, NGBE_LSECTX_PROTOCT);
1471fdb1e851SJiawen Wu 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
1472fdb1e851SJiawen Wu 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
1473fdb1e851SJiawen Wu 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
1474fdb1e851SJiawen Wu 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
1475fdb1e851SJiawen Wu 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
1476fdb1e851SJiawen Wu 	hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
1477fdb1e851SJiawen Wu 	hw_stats->rx_macsec_sc_pkts_unchecked +=
1478fdb1e851SJiawen Wu 			rd32(hw, NGBE_LSECRX_UNCHKPKT);
1479fdb1e851SJiawen Wu 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
1480fdb1e851SJiawen Wu 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
1481fdb1e851SJiawen Wu 	for (i = 0; i < 2; i++) {
1482fdb1e851SJiawen Wu 		hw_stats->rx_macsec_sa_pkts_ok +=
1483fdb1e851SJiawen Wu 			rd32(hw, NGBE_LSECRX_OKPKT(i));
1484fdb1e851SJiawen Wu 		hw_stats->rx_macsec_sa_pkts_invalid +=
1485fdb1e851SJiawen Wu 			rd32(hw, NGBE_LSECRX_INVPKT(i));
1486fdb1e851SJiawen Wu 		hw_stats->rx_macsec_sa_pkts_notvalid +=
1487fdb1e851SJiawen Wu 			rd32(hw, NGBE_LSECRX_BADPKT(i));
1488fdb1e851SJiawen Wu 	}
1489fdb1e851SJiawen Wu 	for (i = 0; i < 4; i++) {
1490fdb1e851SJiawen Wu 		hw_stats->rx_macsec_sa_pkts_unusedsa +=
1491fdb1e851SJiawen Wu 			rd32(hw, NGBE_LSECRX_INVSAPKT(i));
1492fdb1e851SJiawen Wu 		hw_stats->rx_macsec_sa_pkts_notusingsa +=
1493fdb1e851SJiawen Wu 			rd32(hw, NGBE_LSECRX_BADSAPKT(i));
1494fdb1e851SJiawen Wu 	}
1495fdb1e851SJiawen Wu 	hw_stats->rx_total_missed_packets =
1496fdb1e851SJiawen Wu 			hw_stats->rx_up_dropped;
1497fdb1e851SJiawen Wu }
1498fdb1e851SJiawen Wu 
1499fdb1e851SJiawen Wu static int
1500fdb1e851SJiawen Wu ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1501fdb1e851SJiawen Wu {
1502fdb1e851SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1503fdb1e851SJiawen Wu 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
1504fdb1e851SJiawen Wu 	struct ngbe_stat_mappings *stat_mappings =
1505fdb1e851SJiawen Wu 			NGBE_DEV_STAT_MAPPINGS(dev);
15063eba2f28SJiawen Wu 	struct ngbe_tx_queue *txq;
1507fdb1e851SJiawen Wu 	uint32_t i, j;
1508fdb1e851SJiawen Wu 
1509fdb1e851SJiawen Wu 	ngbe_read_stats_registers(hw, hw_stats);
1510fdb1e851SJiawen Wu 
1511fdb1e851SJiawen Wu 	if (stats == NULL)
1512fdb1e851SJiawen Wu 		return -EINVAL;
1513fdb1e851SJiawen Wu 
1514fdb1e851SJiawen Wu 	/* Fill out the rte_eth_stats statistics structure */
1515fdb1e851SJiawen Wu 	stats->ipackets = hw_stats->rx_packets;
1516fdb1e851SJiawen Wu 	stats->ibytes = hw_stats->rx_bytes;
1517fdb1e851SJiawen Wu 	stats->opackets = hw_stats->tx_packets;
1518fdb1e851SJiawen Wu 	stats->obytes = hw_stats->tx_bytes;
1519fdb1e851SJiawen Wu 
1520fdb1e851SJiawen Wu 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
1521fdb1e851SJiawen Wu 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
1522fdb1e851SJiawen Wu 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
1523fdb1e851SJiawen Wu 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
1524fdb1e851SJiawen Wu 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
1525fdb1e851SJiawen Wu 	for (i = 0; i < NGBE_MAX_QP; i++) {
1526fdb1e851SJiawen Wu 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
1527fdb1e851SJiawen Wu 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
1528fdb1e851SJiawen Wu 		uint32_t q_map;
1529fdb1e851SJiawen Wu 
1530fdb1e851SJiawen Wu 		q_map = (stat_mappings->rqsm[n] >> offset)
1531fdb1e851SJiawen Wu 				& QMAP_FIELD_RESERVED_BITS_MASK;
1532fdb1e851SJiawen Wu 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1533fdb1e851SJiawen Wu 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1534fdb1e851SJiawen Wu 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
1535fdb1e851SJiawen Wu 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
1536fdb1e851SJiawen Wu 
1537fdb1e851SJiawen Wu 		q_map = (stat_mappings->tqsm[n] >> offset)
1538fdb1e851SJiawen Wu 				& QMAP_FIELD_RESERVED_BITS_MASK;
1539fdb1e851SJiawen Wu 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
1540fdb1e851SJiawen Wu 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
1541fdb1e851SJiawen Wu 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
1542fdb1e851SJiawen Wu 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
1543fdb1e851SJiawen Wu 	}
1544fdb1e851SJiawen Wu 
1545fdb1e851SJiawen Wu 	/* Rx Errors */
1546fdb1e851SJiawen Wu 	stats->imissed  = hw_stats->rx_total_missed_packets +
1547fdb1e851SJiawen Wu 			  hw_stats->rx_dma_drop;
1548fdb1e851SJiawen Wu 	stats->ierrors  = hw_stats->rx_crc_errors +
1549fdb1e851SJiawen Wu 			  hw_stats->rx_mac_short_packet_dropped +
1550fdb1e851SJiawen Wu 			  hw_stats->rx_length_errors +
1551fdb1e851SJiawen Wu 			  hw_stats->rx_undersize_errors +
1552fee9350fSJiawen Wu 			  hw_stats->rdb_drp_cnt +
1553fdb1e851SJiawen Wu 			  hw_stats->rx_illegal_byte_errors +
1554fdb1e851SJiawen Wu 			  hw_stats->rx_error_bytes +
1555fdb1e851SJiawen Wu 			  hw_stats->rx_fragment_errors;
1556fdb1e851SJiawen Wu 
1557fdb1e851SJiawen Wu 	/* Tx Errors */
1558fdb1e851SJiawen Wu 	stats->oerrors  = 0;
15593eba2f28SJiawen Wu 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
15603eba2f28SJiawen Wu 		txq = dev->data->tx_queues[i];
15613eba2f28SJiawen Wu 		stats->oerrors += txq->desc_error;
15623eba2f28SJiawen Wu 	}
15633eba2f28SJiawen Wu 
1564fdb1e851SJiawen Wu 	return 0;
1565fdb1e851SJiawen Wu }
1566fdb1e851SJiawen Wu 
1567fdb1e851SJiawen Wu static int
1568fdb1e851SJiawen Wu ngbe_dev_stats_reset(struct rte_eth_dev *dev)
1569fdb1e851SJiawen Wu {
1570fdb1e851SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1571fdb1e851SJiawen Wu 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
15723eba2f28SJiawen Wu 	struct ngbe_tx_queue *txq;
15733eba2f28SJiawen Wu 	uint32_t i;
15743eba2f28SJiawen Wu 
15753eba2f28SJiawen Wu 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
15763eba2f28SJiawen Wu 		txq = dev->data->tx_queues[i];
15773eba2f28SJiawen Wu 		txq->desc_error = 0;
15783eba2f28SJiawen Wu 	}
1579fdb1e851SJiawen Wu 
1580fdb1e851SJiawen Wu 	/* HW registers are cleared on read */
1581fdb1e851SJiawen Wu 	hw->offset_loaded = 0;
1582fdb1e851SJiawen Wu 	ngbe_dev_stats_get(dev, NULL);
1583fdb1e851SJiawen Wu 	hw->offset_loaded = 1;
1584fdb1e851SJiawen Wu 
1585fdb1e851SJiawen Wu 	/* Reset software totals */
1586fdb1e851SJiawen Wu 	memset(hw_stats, 0, sizeof(*hw_stats));
1587fdb1e851SJiawen Wu 
1588fdb1e851SJiawen Wu 	return 0;
1589fdb1e851SJiawen Wu }
1590fdb1e851SJiawen Wu 
15918b433d04SJiawen Wu /* This function calculates the number of xstats based on the current config */
15928b433d04SJiawen Wu static unsigned
15938b433d04SJiawen Wu ngbe_xstats_calc_num(struct rte_eth_dev *dev)
15948b433d04SJiawen Wu {
15958b433d04SJiawen Wu 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
15968b433d04SJiawen Wu 	return NGBE_NB_HW_STATS +
15978b433d04SJiawen Wu 	       NGBE_NB_QP_STATS * nb_queues;
15988b433d04SJiawen Wu }
15998b433d04SJiawen Wu 
16008b433d04SJiawen Wu static inline int
16018b433d04SJiawen Wu ngbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
16028b433d04SJiawen Wu {
16038b433d04SJiawen Wu 	int nb, st;
16048b433d04SJiawen Wu 
16058b433d04SJiawen Wu 	/* Extended stats from ngbe_hw_stats */
16068b433d04SJiawen Wu 	if (id < NGBE_NB_HW_STATS) {
16078b433d04SJiawen Wu 		snprintf(name, size, "[hw]%s",
16088b433d04SJiawen Wu 			rte_ngbe_stats_strings[id].name);
16098b433d04SJiawen Wu 		return 0;
16108b433d04SJiawen Wu 	}
16118b433d04SJiawen Wu 	id -= NGBE_NB_HW_STATS;
16128b433d04SJiawen Wu 
16138b433d04SJiawen Wu 	/* Queue Stats */
16148b433d04SJiawen Wu 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
16158b433d04SJiawen Wu 		nb = id / NGBE_NB_QP_STATS;
16168b433d04SJiawen Wu 		st = id % NGBE_NB_QP_STATS;
16178b433d04SJiawen Wu 		snprintf(name, size, "[q%u]%s", nb,
16188b433d04SJiawen Wu 			rte_ngbe_qp_strings[st].name);
16198b433d04SJiawen Wu 		return 0;
16208b433d04SJiawen Wu 	}
16218b433d04SJiawen Wu 	id -= NGBE_NB_QP_STATS * NGBE_MAX_QP;
16228b433d04SJiawen Wu 
16238b433d04SJiawen Wu 	return -(int)(id + 1);
16248b433d04SJiawen Wu }
16258b433d04SJiawen Wu 
16268b433d04SJiawen Wu static inline int
16278b433d04SJiawen Wu ngbe_get_offset_by_id(uint32_t id, uint32_t *offset)
16288b433d04SJiawen Wu {
16298b433d04SJiawen Wu 	int nb, st;
16308b433d04SJiawen Wu 
16318b433d04SJiawen Wu 	/* Extended stats from ngbe_hw_stats */
16328b433d04SJiawen Wu 	if (id < NGBE_NB_HW_STATS) {
16338b433d04SJiawen Wu 		*offset = rte_ngbe_stats_strings[id].offset;
16348b433d04SJiawen Wu 		return 0;
16358b433d04SJiawen Wu 	}
16368b433d04SJiawen Wu 	id -= NGBE_NB_HW_STATS;
16378b433d04SJiawen Wu 
16388b433d04SJiawen Wu 	/* Queue Stats */
16398b433d04SJiawen Wu 	if (id < NGBE_NB_QP_STATS * NGBE_MAX_QP) {
16408b433d04SJiawen Wu 		nb = id / NGBE_NB_QP_STATS;
16418b433d04SJiawen Wu 		st = id % NGBE_NB_QP_STATS;
16428b433d04SJiawen Wu 		*offset = rte_ngbe_qp_strings[st].offset +
16438b433d04SJiawen Wu 			nb * (NGBE_NB_QP_STATS * sizeof(uint64_t));
16448b433d04SJiawen Wu 		return 0;
16458b433d04SJiawen Wu 	}
16468b433d04SJiawen Wu 
16478b433d04SJiawen Wu 	return -1;
16488b433d04SJiawen Wu }
16498b433d04SJiawen Wu 
16508b433d04SJiawen Wu static int ngbe_dev_xstats_get_names(struct rte_eth_dev *dev,
16518b433d04SJiawen Wu 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
16528b433d04SJiawen Wu {
16538b433d04SJiawen Wu 	unsigned int i, count;
16548b433d04SJiawen Wu 
16558b433d04SJiawen Wu 	count = ngbe_xstats_calc_num(dev);
16568b433d04SJiawen Wu 	if (xstats_names == NULL)
16578b433d04SJiawen Wu 		return count;
16588b433d04SJiawen Wu 
16598b433d04SJiawen Wu 	/* Note: limit >= cnt_stats checked upstream
16608b433d04SJiawen Wu 	 * in rte_eth_xstats_names()
16618b433d04SJiawen Wu 	 */
16628b433d04SJiawen Wu 	limit = min(limit, count);
16638b433d04SJiawen Wu 
16648b433d04SJiawen Wu 	/* Extended stats from ngbe_hw_stats */
16658b433d04SJiawen Wu 	for (i = 0; i < limit; i++) {
16668b433d04SJiawen Wu 		if (ngbe_get_name_by_id(i, xstats_names[i].name,
16678b433d04SJiawen Wu 			sizeof(xstats_names[i].name))) {
16688b433d04SJiawen Wu 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
16698b433d04SJiawen Wu 			break;
16708b433d04SJiawen Wu 		}
16718b433d04SJiawen Wu 	}
16728b433d04SJiawen Wu 
16738b433d04SJiawen Wu 	return i;
16748b433d04SJiawen Wu }
16758b433d04SJiawen Wu 
16768b433d04SJiawen Wu static int ngbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
16778b433d04SJiawen Wu 	const uint64_t *ids,
16788b433d04SJiawen Wu 	struct rte_eth_xstat_name *xstats_names,
16798b433d04SJiawen Wu 	unsigned int limit)
16808b433d04SJiawen Wu {
16818b433d04SJiawen Wu 	unsigned int i;
16828b433d04SJiawen Wu 
16838b433d04SJiawen Wu 	if (ids == NULL)
16848b433d04SJiawen Wu 		return ngbe_dev_xstats_get_names(dev, xstats_names, limit);
16858b433d04SJiawen Wu 
16868b433d04SJiawen Wu 	for (i = 0; i < limit; i++) {
16878b433d04SJiawen Wu 		if (ngbe_get_name_by_id(ids[i], xstats_names[i].name,
16888b433d04SJiawen Wu 				sizeof(xstats_names[i].name))) {
16898b433d04SJiawen Wu 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
16908b433d04SJiawen Wu 			return -1;
16918b433d04SJiawen Wu 		}
16928b433d04SJiawen Wu 	}
16938b433d04SJiawen Wu 
16948b433d04SJiawen Wu 	return i;
16958b433d04SJiawen Wu }
16968b433d04SJiawen Wu 
16978b433d04SJiawen Wu static int
16988b433d04SJiawen Wu ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
16998b433d04SJiawen Wu 					 unsigned int limit)
17008b433d04SJiawen Wu {
17018b433d04SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
17028b433d04SJiawen Wu 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
17038b433d04SJiawen Wu 	unsigned int i, count;
17048b433d04SJiawen Wu 
17058b433d04SJiawen Wu 	ngbe_read_stats_registers(hw, hw_stats);
17068b433d04SJiawen Wu 
17078b433d04SJiawen Wu 	/* If this is a reset xstats is NULL, and we have cleared the
17088b433d04SJiawen Wu 	 * registers by reading them.
17098b433d04SJiawen Wu 	 */
17108b433d04SJiawen Wu 	count = ngbe_xstats_calc_num(dev);
17118b433d04SJiawen Wu 	if (xstats == NULL)
17128b433d04SJiawen Wu 		return count;
17138b433d04SJiawen Wu 
17148b433d04SJiawen Wu 	limit = min(limit, ngbe_xstats_calc_num(dev));
17158b433d04SJiawen Wu 
17168b433d04SJiawen Wu 	/* Extended stats from ngbe_hw_stats */
17178b433d04SJiawen Wu 	for (i = 0; i < limit; i++) {
17188b433d04SJiawen Wu 		uint32_t offset = 0;
17198b433d04SJiawen Wu 
17208b433d04SJiawen Wu 		if (ngbe_get_offset_by_id(i, &offset)) {
17218b433d04SJiawen Wu 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
17228b433d04SJiawen Wu 			break;
17238b433d04SJiawen Wu 		}
17248b433d04SJiawen Wu 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
17258b433d04SJiawen Wu 		xstats[i].id = i;
17268b433d04SJiawen Wu 	}
17278b433d04SJiawen Wu 
17288b433d04SJiawen Wu 	return i;
17298b433d04SJiawen Wu }
17308b433d04SJiawen Wu 
17318b433d04SJiawen Wu static int
17328b433d04SJiawen Wu ngbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
17338b433d04SJiawen Wu 					 unsigned int limit)
17348b433d04SJiawen Wu {
17358b433d04SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
17368b433d04SJiawen Wu 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
17378b433d04SJiawen Wu 	unsigned int i, count;
17388b433d04SJiawen Wu 
17398b433d04SJiawen Wu 	ngbe_read_stats_registers(hw, hw_stats);
17408b433d04SJiawen Wu 
17418b433d04SJiawen Wu 	/* If this is a reset xstats is NULL, and we have cleared the
17428b433d04SJiawen Wu 	 * registers by reading them.
17438b433d04SJiawen Wu 	 */
17448b433d04SJiawen Wu 	count = ngbe_xstats_calc_num(dev);
17458b433d04SJiawen Wu 	if (values == NULL)
17468b433d04SJiawen Wu 		return count;
17478b433d04SJiawen Wu 
17488b433d04SJiawen Wu 	limit = min(limit, ngbe_xstats_calc_num(dev));
17498b433d04SJiawen Wu 
17508b433d04SJiawen Wu 	/* Extended stats from ngbe_hw_stats */
17518b433d04SJiawen Wu 	for (i = 0; i < limit; i++) {
17528b433d04SJiawen Wu 		uint32_t offset;
17538b433d04SJiawen Wu 
17548b433d04SJiawen Wu 		if (ngbe_get_offset_by_id(i, &offset)) {
17558b433d04SJiawen Wu 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
17568b433d04SJiawen Wu 			break;
17578b433d04SJiawen Wu 		}
17588b433d04SJiawen Wu 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
17598b433d04SJiawen Wu 	}
17608b433d04SJiawen Wu 
17618b433d04SJiawen Wu 	return i;
17628b433d04SJiawen Wu }
17638b433d04SJiawen Wu 
17648b433d04SJiawen Wu static int
17658b433d04SJiawen Wu ngbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
17668b433d04SJiawen Wu 		uint64_t *values, unsigned int limit)
17678b433d04SJiawen Wu {
17688b433d04SJiawen Wu 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
17698b433d04SJiawen Wu 	unsigned int i;
17708b433d04SJiawen Wu 
17718b433d04SJiawen Wu 	if (ids == NULL)
17728b433d04SJiawen Wu 		return ngbe_dev_xstats_get_(dev, values, limit);
17738b433d04SJiawen Wu 
17748b433d04SJiawen Wu 	for (i = 0; i < limit; i++) {
17758b433d04SJiawen Wu 		uint32_t offset;
17768b433d04SJiawen Wu 
17778b433d04SJiawen Wu 		if (ngbe_get_offset_by_id(ids[i], &offset)) {
17788b433d04SJiawen Wu 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
17798b433d04SJiawen Wu 			break;
17808b433d04SJiawen Wu 		}
17818b433d04SJiawen Wu 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
17828b433d04SJiawen Wu 	}
17838b433d04SJiawen Wu 
17848b433d04SJiawen Wu 	return i;
17858b433d04SJiawen Wu }
17868b433d04SJiawen Wu 
17878b433d04SJiawen Wu static int
17888b433d04SJiawen Wu ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
17898b433d04SJiawen Wu {
17908b433d04SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
17918b433d04SJiawen Wu 	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
17928b433d04SJiawen Wu 
17938b433d04SJiawen Wu 	/* HW registers are cleared on read */
17948b433d04SJiawen Wu 	hw->offset_loaded = 0;
17958b433d04SJiawen Wu 	ngbe_read_stats_registers(hw, hw_stats);
17968b433d04SJiawen Wu 	hw->offset_loaded = 1;
17978b433d04SJiawen Wu 
17988b433d04SJiawen Wu 	/* Reset software totals */
17998b433d04SJiawen Wu 	memset(hw_stats, 0, sizeof(*hw_stats));
18008b433d04SJiawen Wu 
18018b433d04SJiawen Wu 	return 0;
18028b433d04SJiawen Wu }
18038b433d04SJiawen Wu 
1804b9246b8fSJiawen Wu static int
1805506abd4aSJiawen Wu ngbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1806506abd4aSJiawen Wu {
1807506abd4aSJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1808506abd4aSJiawen Wu 	int ret;
1809506abd4aSJiawen Wu 
1810506abd4aSJiawen Wu 	ret = snprintf(fw_version, fw_size, "0x%08x", hw->eeprom_id);
1811506abd4aSJiawen Wu 
1812506abd4aSJiawen Wu 	if (ret < 0)
1813506abd4aSJiawen Wu 		return -EINVAL;
1814506abd4aSJiawen Wu 
1815506abd4aSJiawen Wu 	ret += 1; /* add the size of '\0' */
1816506abd4aSJiawen Wu 	if (fw_size < (size_t)ret)
1817506abd4aSJiawen Wu 		return ret;
1818506abd4aSJiawen Wu 
1819506abd4aSJiawen Wu 	return 0;
1820506abd4aSJiawen Wu }
1821506abd4aSJiawen Wu 
1822506abd4aSJiawen Wu static int
1823b9246b8fSJiawen Wu ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1824b9246b8fSJiawen Wu {
1825dee93977SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
182643b7e5eaSJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
182743b7e5eaSJiawen Wu 
182843b7e5eaSJiawen Wu 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1829a58e7c31SJiawen Wu 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
183062fc35e6SJiawen Wu 	dev_info->min_rx_bufsize = 1024;
1831df2075ebSJiawen Wu 	dev_info->max_rx_pktlen = NGBE_MAX_MTU + NGBE_ETH_OVERHEAD;
1832df2075ebSJiawen Wu 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1833df2075ebSJiawen Wu 	dev_info->max_mtu = NGBE_MAX_MTU;
1834dee93977SJiawen Wu 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1835dee93977SJiawen Wu 	dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC;
1836dee93977SJiawen Wu 	dev_info->max_vfs = pci_dev->max_vfs;
183759b46438SJiawen Wu 	dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev);
183879f3128dSJiawen Wu 	dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
183979f3128dSJiawen Wu 				     dev_info->rx_queue_offload_capa);
18409f320614SJiawen Wu 	dev_info->tx_queue_offload_capa = 0;
18419f320614SJiawen Wu 	dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
184243b7e5eaSJiawen Wu 
184343b7e5eaSJiawen Wu 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
184443b7e5eaSJiawen Wu 		.rx_thresh = {
184543b7e5eaSJiawen Wu 			.pthresh = NGBE_DEFAULT_RX_PTHRESH,
184643b7e5eaSJiawen Wu 			.hthresh = NGBE_DEFAULT_RX_HTHRESH,
184743b7e5eaSJiawen Wu 			.wthresh = NGBE_DEFAULT_RX_WTHRESH,
184843b7e5eaSJiawen Wu 		},
184943b7e5eaSJiawen Wu 		.rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
185043b7e5eaSJiawen Wu 		.rx_drop_en = 0,
185143b7e5eaSJiawen Wu 		.offloads = 0,
185243b7e5eaSJiawen Wu 	};
185343b7e5eaSJiawen Wu 
1854a58e7c31SJiawen Wu 	dev_info->default_txconf = (struct rte_eth_txconf) {
1855a58e7c31SJiawen Wu 		.tx_thresh = {
1856a58e7c31SJiawen Wu 			.pthresh = NGBE_DEFAULT_TX_PTHRESH,
1857a58e7c31SJiawen Wu 			.hthresh = NGBE_DEFAULT_TX_HTHRESH,
1858a58e7c31SJiawen Wu 			.wthresh = NGBE_DEFAULT_TX_WTHRESH,
1859a58e7c31SJiawen Wu 		},
1860a58e7c31SJiawen Wu 		.tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
1861a58e7c31SJiawen Wu 		.offloads = 0,
1862a58e7c31SJiawen Wu 	};
1863a58e7c31SJiawen Wu 
186443b7e5eaSJiawen Wu 	dev_info->rx_desc_lim = rx_desc_lim;
1865a58e7c31SJiawen Wu 	dev_info->tx_desc_lim = tx_desc_lim;
1866b9246b8fSJiawen Wu 
18670779d7f6SJiawen Wu 	dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
18680779d7f6SJiawen Wu 	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
18690779d7f6SJiawen Wu 	dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
18700779d7f6SJiawen Wu 
1871295968d1SFerruh Yigit 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
1872295968d1SFerruh Yigit 				RTE_ETH_LINK_SPEED_10M;
1873b9246b8fSJiawen Wu 
187443b7e5eaSJiawen Wu 	/* Driver-preferred Rx/Tx parameters */
187562fc35e6SJiawen Wu 	dev_info->default_rxportconf.burst_size = 32;
1876001c7823SJiawen Wu 	dev_info->default_txportconf.burst_size = 32;
187743b7e5eaSJiawen Wu 	dev_info->default_rxportconf.nb_queues = 1;
1878a58e7c31SJiawen Wu 	dev_info->default_txportconf.nb_queues = 1;
187943b7e5eaSJiawen Wu 	dev_info->default_rxportconf.ring_size = 256;
1880a58e7c31SJiawen Wu 	dev_info->default_txportconf.ring_size = 256;
188143b7e5eaSJiawen Wu 
1882b9246b8fSJiawen Wu 	return 0;
1883b9246b8fSJiawen Wu }
1884b9246b8fSJiawen Wu 
1885f6aef1daSJiawen Wu const uint32_t *
1886ba6a168aSSivaramakrishnan Venkat ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
1887f6aef1daSJiawen Wu {
188879f3128dSJiawen Wu 	if (dev->rx_pkt_burst == ngbe_recv_pkts ||
1889e94c20c3SJiawen Wu #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)
1890e94c20c3SJiawen Wu 	    dev->rx_pkt_burst == ngbe_recv_pkts_vec ||
1891e94c20c3SJiawen Wu 	    dev->rx_pkt_burst == ngbe_recv_scattered_pkts_vec ||
1892e94c20c3SJiawen Wu #endif
189379f3128dSJiawen Wu 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
189479f3128dSJiawen Wu 	    dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
189579f3128dSJiawen Wu 	    dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
1896ba6a168aSSivaramakrishnan Venkat 		return ngbe_get_supported_ptypes(no_of_elements);
1897f6aef1daSJiawen Wu 
1898f6aef1daSJiawen Wu 	return NULL;
1899f6aef1daSJiawen Wu }
1900f6aef1daSJiawen Wu 
1901f3ff9f28SJiawen Wu static void
1902f3ff9f28SJiawen Wu ngbe_dev_overheat(struct rte_eth_dev *dev)
1903f3ff9f28SJiawen Wu {
1904f3ff9f28SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1905f3ff9f28SJiawen Wu 	s32 temp_state;
1906f3ff9f28SJiawen Wu 
1907f3ff9f28SJiawen Wu 	temp_state = hw->mac.check_overtemp(hw);
1908f3ff9f28SJiawen Wu 	if (!temp_state)
1909f3ff9f28SJiawen Wu 		return;
1910f3ff9f28SJiawen Wu 
1911f3ff9f28SJiawen Wu 	if (temp_state == NGBE_ERR_UNDERTEMP) {
1912f3ff9f28SJiawen Wu 		PMD_DRV_LOG(CRIT, "Network adapter has been started again, "
1913f3ff9f28SJiawen Wu 			"since the temperature has been back to normal state.");
1914f3ff9f28SJiawen Wu 		wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, NGBE_PBRXCTL_ENA);
1915f3ff9f28SJiawen Wu 		ngbe_dev_set_link_up(dev);
1916f3ff9f28SJiawen Wu 	} else if (temp_state == NGBE_ERR_OVERTEMP) {
1917f3ff9f28SJiawen Wu 		PMD_DRV_LOG(CRIT, "Network adapter has been stopped because it has over heated.");
1918f3ff9f28SJiawen Wu 		wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0);
1919f3ff9f28SJiawen Wu 		ngbe_dev_set_link_down(dev);
1920f3ff9f28SJiawen Wu 	}
1921f3ff9f28SJiawen Wu }
1922f3ff9f28SJiawen Wu 
1923b9246b8fSJiawen Wu /* return 0 means link status changed, -1 means not changed */
1924b9246b8fSJiawen Wu int
1925b9246b8fSJiawen Wu ngbe_dev_link_update_share(struct rte_eth_dev *dev,
1926b9246b8fSJiawen Wu 			    int wait_to_complete)
1927b9246b8fSJiawen Wu {
1928b9246b8fSJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1929b9246b8fSJiawen Wu 	struct rte_eth_link link;
1930b9246b8fSJiawen Wu 	u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
1931b9246b8fSJiawen Wu 	u32 lan_speed = 0;
1932b9246b8fSJiawen Wu 	bool link_up;
1933b9246b8fSJiawen Wu 	int err;
1934b9246b8fSJiawen Wu 	int wait = 1;
1935b8d52e10SJiawen Wu 	u32 reg;
1936b9246b8fSJiawen Wu 
1937b9246b8fSJiawen Wu 	memset(&link, 0, sizeof(link));
1938295968d1SFerruh Yigit 	link.link_status = RTE_ETH_LINK_DOWN;
1939295968d1SFerruh Yigit 	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1940295968d1SFerruh Yigit 	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1941b9246b8fSJiawen Wu 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1942295968d1SFerruh Yigit 			~RTE_ETH_LINK_SPEED_AUTONEG);
1943b9246b8fSJiawen Wu 
1944b9246b8fSJiawen Wu 	hw->mac.get_link_status = true;
1945b9246b8fSJiawen Wu 
1946b9246b8fSJiawen Wu 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
1947b9246b8fSJiawen Wu 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1948b9246b8fSJiawen Wu 		wait = 0;
1949b9246b8fSJiawen Wu 
1950b9246b8fSJiawen Wu 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
1951b9246b8fSJiawen Wu 	if (err != 0) {
1952295968d1SFerruh Yigit 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1953295968d1SFerruh Yigit 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1954b9246b8fSJiawen Wu 		return rte_eth_linkstatus_set(dev, &link);
1955b9246b8fSJiawen Wu 	}
1956b9246b8fSJiawen Wu 
1957708ebe7dSJiawen Wu 	if (!link_up)
1958b9246b8fSJiawen Wu 		return rte_eth_linkstatus_set(dev, &link);
1959b9246b8fSJiawen Wu 
1960295968d1SFerruh Yigit 	link.link_status = RTE_ETH_LINK_UP;
1961295968d1SFerruh Yigit 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1962b9246b8fSJiawen Wu 
1963b9246b8fSJiawen Wu 	switch (link_speed) {
1964b9246b8fSJiawen Wu 	default:
1965b9246b8fSJiawen Wu 	case NGBE_LINK_SPEED_UNKNOWN:
1966295968d1SFerruh Yigit 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1967b9246b8fSJiawen Wu 		break;
1968b9246b8fSJiawen Wu 
1969b9246b8fSJiawen Wu 	case NGBE_LINK_SPEED_10M_FULL:
1970295968d1SFerruh Yigit 		link.link_speed = RTE_ETH_SPEED_NUM_10M;
1971b9246b8fSJiawen Wu 		lan_speed = 0;
1972b9246b8fSJiawen Wu 		break;
1973b9246b8fSJiawen Wu 
1974b9246b8fSJiawen Wu 	case NGBE_LINK_SPEED_100M_FULL:
1975295968d1SFerruh Yigit 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
1976b9246b8fSJiawen Wu 		lan_speed = 1;
1977b9246b8fSJiawen Wu 		break;
1978b9246b8fSJiawen Wu 
1979b9246b8fSJiawen Wu 	case NGBE_LINK_SPEED_1GB_FULL:
1980295968d1SFerruh Yigit 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
1981b9246b8fSJiawen Wu 		lan_speed = 2;
1982b9246b8fSJiawen Wu 		break;
1983b9246b8fSJiawen Wu 	}
1984b9246b8fSJiawen Wu 
1985b9246b8fSJiawen Wu 	if (hw->is_pf) {
1986b9246b8fSJiawen Wu 		wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
1987b9246b8fSJiawen Wu 		if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
1988b9246b8fSJiawen Wu 				NGBE_LINK_SPEED_100M_FULL |
1989b9246b8fSJiawen Wu 				NGBE_LINK_SPEED_10M_FULL)) {
1990b9246b8fSJiawen Wu 			wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
1991b9246b8fSJiawen Wu 				NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
1992b9246b8fSJiawen Wu 		}
1993b8d52e10SJiawen Wu 		/* Re configure MAC RX */
1994b8d52e10SJiawen Wu 		reg = rd32(hw, NGBE_MACRXCFG);
1995b8d52e10SJiawen Wu 		wr32(hw, NGBE_MACRXCFG, reg);
1996a7c5f95eSJiawen Wu 		wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_PROMISC,
1997a7c5f95eSJiawen Wu 			NGBE_MACRXFLT_PROMISC);
1998b8d52e10SJiawen Wu 		reg = rd32(hw, NGBE_MAC_WDG_TIMEOUT);
1999b8d52e10SJiawen Wu 		wr32(hw, NGBE_MAC_WDG_TIMEOUT, reg);
2000b9246b8fSJiawen Wu 	}
2001b9246b8fSJiawen Wu 
2002b9246b8fSJiawen Wu 	return rte_eth_linkstatus_set(dev, &link);
2003b9246b8fSJiawen Wu }
2004b9246b8fSJiawen Wu 
2005b9246b8fSJiawen Wu static int
2006b9246b8fSJiawen Wu ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2007b9246b8fSJiawen Wu {
2008b9246b8fSJiawen Wu 	return ngbe_dev_link_update_share(dev, wait_to_complete);
2009b9246b8fSJiawen Wu }
2010b9246b8fSJiawen Wu 
2011b83372a0SJiawen Wu static int
2012b83372a0SJiawen Wu ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2013b83372a0SJiawen Wu {
2014b83372a0SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2015b83372a0SJiawen Wu 	uint32_t fctrl;
2016b83372a0SJiawen Wu 
2017b83372a0SJiawen Wu 	fctrl = rd32(hw, NGBE_PSRCTL);
2018b83372a0SJiawen Wu 	fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
2019b83372a0SJiawen Wu 	wr32(hw, NGBE_PSRCTL, fctrl);
2020b83372a0SJiawen Wu 
2021b83372a0SJiawen Wu 	return 0;
2022b83372a0SJiawen Wu }
2023b83372a0SJiawen Wu 
2024b83372a0SJiawen Wu static int
2025b83372a0SJiawen Wu ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2026b83372a0SJiawen Wu {
2027b83372a0SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2028b83372a0SJiawen Wu 	uint32_t fctrl;
2029b83372a0SJiawen Wu 
2030b83372a0SJiawen Wu 	fctrl = rd32(hw, NGBE_PSRCTL);
2031b83372a0SJiawen Wu 	fctrl &= (~NGBE_PSRCTL_UCP);
2032b83372a0SJiawen Wu 	if (dev->data->all_multicast == 1)
2033b83372a0SJiawen Wu 		fctrl |= NGBE_PSRCTL_MCP;
2034b83372a0SJiawen Wu 	else
2035b83372a0SJiawen Wu 		fctrl &= (~NGBE_PSRCTL_MCP);
2036b83372a0SJiawen Wu 	wr32(hw, NGBE_PSRCTL, fctrl);
2037b83372a0SJiawen Wu 
2038b83372a0SJiawen Wu 	return 0;
2039b83372a0SJiawen Wu }
2040b83372a0SJiawen Wu 
2041b83372a0SJiawen Wu static int
2042b83372a0SJiawen Wu ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2043b83372a0SJiawen Wu {
2044b83372a0SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2045b83372a0SJiawen Wu 	uint32_t fctrl;
2046b83372a0SJiawen Wu 
2047b83372a0SJiawen Wu 	fctrl = rd32(hw, NGBE_PSRCTL);
2048b83372a0SJiawen Wu 	fctrl |= NGBE_PSRCTL_MCP;
2049b83372a0SJiawen Wu 	wr32(hw, NGBE_PSRCTL, fctrl);
2050b83372a0SJiawen Wu 
2051b83372a0SJiawen Wu 	return 0;
2052b83372a0SJiawen Wu }
2053b83372a0SJiawen Wu 
2054b83372a0SJiawen Wu static int
2055b83372a0SJiawen Wu ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2056b83372a0SJiawen Wu {
2057b83372a0SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2058b83372a0SJiawen Wu 	uint32_t fctrl;
2059b83372a0SJiawen Wu 
2060b83372a0SJiawen Wu 	if (dev->data->promiscuous == 1)
2061b83372a0SJiawen Wu 		return 0; /* must remain in all_multicast mode */
2062b83372a0SJiawen Wu 
2063b83372a0SJiawen Wu 	fctrl = rd32(hw, NGBE_PSRCTL);
2064b83372a0SJiawen Wu 	fctrl &= (~NGBE_PSRCTL_MCP);
2065b83372a0SJiawen Wu 	wr32(hw, NGBE_PSRCTL, fctrl);
2066b83372a0SJiawen Wu 
2067b83372a0SJiawen Wu 	return 0;
2068b83372a0SJiawen Wu }
2069b83372a0SJiawen Wu 
20703518df57SJiawen Wu /**
20713518df57SJiawen Wu  * It clears the interrupt causes and enables the interrupt.
20723518df57SJiawen Wu  * It will be called once only during NIC initialized.
20733518df57SJiawen Wu  *
20743518df57SJiawen Wu  * @param dev
20753518df57SJiawen Wu  *  Pointer to struct rte_eth_dev.
20763518df57SJiawen Wu  * @param on
20773518df57SJiawen Wu  *  Enable or Disable.
20783518df57SJiawen Wu  *
20793518df57SJiawen Wu  * @return
20803518df57SJiawen Wu  *  - On success, zero.
20813518df57SJiawen Wu  *  - On failure, a negative value.
20823518df57SJiawen Wu  */
20833518df57SJiawen Wu static int
20843518df57SJiawen Wu ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
20853518df57SJiawen Wu {
20863518df57SJiawen Wu 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
20873518df57SJiawen Wu 
20883518df57SJiawen Wu 	ngbe_dev_link_status_print(dev);
20893518df57SJiawen Wu 	if (on != 0) {
20903518df57SJiawen Wu 		intr->mask_misc |= NGBE_ICRMISC_PHY;
20913518df57SJiawen Wu 		intr->mask_misc |= NGBE_ICRMISC_GPIO;
20923518df57SJiawen Wu 	} else {
20933518df57SJiawen Wu 		intr->mask_misc &= ~NGBE_ICRMISC_PHY;
20943518df57SJiawen Wu 		intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
20953518df57SJiawen Wu 	}
20963518df57SJiawen Wu 
20973518df57SJiawen Wu 	return 0;
20983518df57SJiawen Wu }
20993518df57SJiawen Wu 
21003518df57SJiawen Wu /**
21013518df57SJiawen Wu  * It clears the interrupt causes and enables the interrupt.
21023518df57SJiawen Wu  * It will be called once only during NIC initialized.
21033518df57SJiawen Wu  *
21043518df57SJiawen Wu  * @param dev
21053518df57SJiawen Wu  *  Pointer to struct rte_eth_dev.
21063518df57SJiawen Wu  *
21073518df57SJiawen Wu  * @return
21083518df57SJiawen Wu  *  - On success, zero.
21093518df57SJiawen Wu  *  - On failure, a negative value.
21103518df57SJiawen Wu  */
21113518df57SJiawen Wu static int
21123518df57SJiawen Wu ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
21133518df57SJiawen Wu {
21143518df57SJiawen Wu 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
21153518df57SJiawen Wu 	u64 mask;
21163518df57SJiawen Wu 
21173518df57SJiawen Wu 	mask = NGBE_ICR_MASK;
21183518df57SJiawen Wu 	mask &= (1ULL << NGBE_MISC_VEC_ID);
21193518df57SJiawen Wu 	intr->mask |= mask;
21203518df57SJiawen Wu 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
21213518df57SJiawen Wu 
21223518df57SJiawen Wu 	return 0;
21233518df57SJiawen Wu }
21243518df57SJiawen Wu 
21253518df57SJiawen Wu /**
21263518df57SJiawen Wu  * It clears the interrupt causes and enables the interrupt.
21273518df57SJiawen Wu  * It will be called once only during NIC initialized.
21283518df57SJiawen Wu  *
21293518df57SJiawen Wu  * @param dev
21303518df57SJiawen Wu  *  Pointer to struct rte_eth_dev.
21313518df57SJiawen Wu  *
21323518df57SJiawen Wu  * @return
21333518df57SJiawen Wu  *  - On success, zero.
21343518df57SJiawen Wu  *  - On failure, a negative value.
21353518df57SJiawen Wu  */
21363518df57SJiawen Wu static int
21373518df57SJiawen Wu ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
21383518df57SJiawen Wu {
21393518df57SJiawen Wu 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
21403518df57SJiawen Wu 	u64 mask;
21413518df57SJiawen Wu 
21423518df57SJiawen Wu 	mask = NGBE_ICR_MASK;
21433518df57SJiawen Wu 	mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
21443518df57SJiawen Wu 	intr->mask |= mask;
21453518df57SJiawen Wu 
21463518df57SJiawen Wu 	return 0;
21473518df57SJiawen Wu }
21483518df57SJiawen Wu 
21493518df57SJiawen Wu /**
21503518df57SJiawen Wu  * It clears the interrupt causes and enables the interrupt.
21513518df57SJiawen Wu  * It will be called once only during NIC initialized.
21523518df57SJiawen Wu  *
21533518df57SJiawen Wu  * @param dev
21543518df57SJiawen Wu  *  Pointer to struct rte_eth_dev.
21553518df57SJiawen Wu  *
21563518df57SJiawen Wu  * @return
21573518df57SJiawen Wu  *  - On success, zero.
21583518df57SJiawen Wu  *  - On failure, a negative value.
21593518df57SJiawen Wu  */
21603518df57SJiawen Wu static int
21613518df57SJiawen Wu ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
21623518df57SJiawen Wu {
21633518df57SJiawen Wu 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
21643518df57SJiawen Wu 
21653518df57SJiawen Wu 	intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
21663518df57SJiawen Wu 
21673518df57SJiawen Wu 	return 0;
21683518df57SJiawen Wu }
21693518df57SJiawen Wu 
2170b9246b8fSJiawen Wu /*
2171b9246b8fSJiawen Wu  * It reads ICR and sets flag for the link_update.
2172b9246b8fSJiawen Wu  *
2173b9246b8fSJiawen Wu  * @param dev
2174b9246b8fSJiawen Wu  *  Pointer to struct rte_eth_dev.
2175b9246b8fSJiawen Wu  *
2176b9246b8fSJiawen Wu  * @return
2177b9246b8fSJiawen Wu  *  - On success, zero.
2178b9246b8fSJiawen Wu  *  - On failure, a negative value.
2179b9246b8fSJiawen Wu  */
2180b9246b8fSJiawen Wu static int
2181b9246b8fSJiawen Wu ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2182b9246b8fSJiawen Wu {
2183b9246b8fSJiawen Wu 	uint32_t eicr;
2184b9246b8fSJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2185b9246b8fSJiawen Wu 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2186b9246b8fSJiawen Wu 
218768f04c0aSJiawen Wu 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_VEC0];
218868f04c0aSJiawen Wu 	if (!eicr) {
218968f04c0aSJiawen Wu 		/*
219068f04c0aSJiawen Wu 		 * shared interrupt alert!
219168f04c0aSJiawen Wu 		 * make sure interrupts are enabled because the read will
219268f04c0aSJiawen Wu 		 * have disabled interrupts.
219368f04c0aSJiawen Wu 		 */
219468f04c0aSJiawen Wu 		if (!hw->adapter_stopped)
219568f04c0aSJiawen Wu 			ngbe_enable_intr(dev);
219668f04c0aSJiawen Wu 		return 0;
219768f04c0aSJiawen Wu 	}
219868f04c0aSJiawen Wu 	((u32 *)hw->isb_mem)[NGBE_ISB_VEC0] = 0;
219968f04c0aSJiawen Wu 
2200b9246b8fSJiawen Wu 	/* read-on-clear nic registers here */
2201b9246b8fSJiawen Wu 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
2202b9246b8fSJiawen Wu 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2203b9246b8fSJiawen Wu 
2204b9246b8fSJiawen Wu 	intr->flags = 0;
2205b9246b8fSJiawen Wu 
2206b9246b8fSJiawen Wu 	/* set flag for async link update */
2207b9246b8fSJiawen Wu 	if (eicr & NGBE_ICRMISC_PHY)
2208b9246b8fSJiawen Wu 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2209b9246b8fSJiawen Wu 
2210b9246b8fSJiawen Wu 	if (eicr & NGBE_ICRMISC_VFMBX)
2211b9246b8fSJiawen Wu 		intr->flags |= NGBE_FLAG_MAILBOX;
2212b9246b8fSJiawen Wu 
2213b9246b8fSJiawen Wu 	if (eicr & NGBE_ICRMISC_LNKSEC)
2214b9246b8fSJiawen Wu 		intr->flags |= NGBE_FLAG_MACSEC;
2215b9246b8fSJiawen Wu 
2216b9246b8fSJiawen Wu 	if (eicr & NGBE_ICRMISC_GPIO)
2217b9246b8fSJiawen Wu 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
2218b9246b8fSJiawen Wu 
2219f3ff9f28SJiawen Wu 	if (eicr & NGBE_ICRMISC_HEAT)
2220f3ff9f28SJiawen Wu 		intr->flags |= NGBE_FLAG_OVERHEAT;
2221f3ff9f28SJiawen Wu 
22220360c23fSJiawen Wu 	((u32 *)hw->isb_mem)[NGBE_ISB_MISC] = 0;
22230360c23fSJiawen Wu 
2224b9246b8fSJiawen Wu 	return 0;
2225b9246b8fSJiawen Wu }
2226b9246b8fSJiawen Wu 
2227b9246b8fSJiawen Wu /**
2228b9246b8fSJiawen Wu  * It gets and then prints the link status.
2229b9246b8fSJiawen Wu  *
2230b9246b8fSJiawen Wu  * @param dev
2231b9246b8fSJiawen Wu  *  Pointer to struct rte_eth_dev.
2232b9246b8fSJiawen Wu  *
2233b9246b8fSJiawen Wu  * @return
2234b9246b8fSJiawen Wu  *  - On success, zero.
2235b9246b8fSJiawen Wu  *  - On failure, a negative value.
2236b9246b8fSJiawen Wu  */
2237b9246b8fSJiawen Wu static void
2238b9246b8fSJiawen Wu ngbe_dev_link_status_print(struct rte_eth_dev *dev)
2239b9246b8fSJiawen Wu {
2240b9246b8fSJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2241b9246b8fSJiawen Wu 	struct rte_eth_link link;
2242b9246b8fSJiawen Wu 
2243b9246b8fSJiawen Wu 	rte_eth_linkstatus_get(dev, &link);
2244b9246b8fSJiawen Wu 
2245295968d1SFerruh Yigit 	if (link.link_status == RTE_ETH_LINK_UP) {
2246b9246b8fSJiawen Wu 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2247b9246b8fSJiawen Wu 					(int)(dev->data->port_id),
2248b9246b8fSJiawen Wu 					(unsigned int)link.link_speed,
2249295968d1SFerruh Yigit 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2250b9246b8fSJiawen Wu 					"full-duplex" : "half-duplex");
2251b9246b8fSJiawen Wu 	} else {
2252b9246b8fSJiawen Wu 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
2253b9246b8fSJiawen Wu 				(int)(dev->data->port_id));
2254b9246b8fSJiawen Wu 	}
2255b9246b8fSJiawen Wu 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2256b9246b8fSJiawen Wu 				pci_dev->addr.domain,
2257b9246b8fSJiawen Wu 				pci_dev->addr.bus,
2258b9246b8fSJiawen Wu 				pci_dev->addr.devid,
2259b9246b8fSJiawen Wu 				pci_dev->addr.function);
2260b9246b8fSJiawen Wu }
2261b9246b8fSJiawen Wu 
2262b9246b8fSJiawen Wu /*
2263b9246b8fSJiawen Wu  * It executes link_update after knowing an interrupt occurred.
2264b9246b8fSJiawen Wu  *
2265b9246b8fSJiawen Wu  * @param dev
2266b9246b8fSJiawen Wu  *  Pointer to struct rte_eth_dev.
2267b9246b8fSJiawen Wu  *
2268b9246b8fSJiawen Wu  * @return
2269b9246b8fSJiawen Wu  *  - On success, zero.
2270b9246b8fSJiawen Wu  *  - On failure, a negative value.
2271b9246b8fSJiawen Wu  */
2272b9246b8fSJiawen Wu static int
2273b9246b8fSJiawen Wu ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
2274b9246b8fSJiawen Wu {
2275b9246b8fSJiawen Wu 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
2276b9246b8fSJiawen Wu 
2277b9246b8fSJiawen Wu 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2278b9246b8fSJiawen Wu 
2279e2a289a7SJiawen Wu 	if (intr->flags & NGBE_FLAG_MAILBOX) {
2280e2a289a7SJiawen Wu 		ngbe_pf_mbx_process(dev);
2281e2a289a7SJiawen Wu 		intr->flags &= ~NGBE_FLAG_MAILBOX;
2282e2a289a7SJiawen Wu 	}
2283e2a289a7SJiawen Wu 
2284b9246b8fSJiawen Wu 	if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
2285b9246b8fSJiawen Wu 		struct rte_eth_link link;
2286b9246b8fSJiawen Wu 
2287b9246b8fSJiawen Wu 		/*get the link status before link update, for predicting later*/
2288b9246b8fSJiawen Wu 		rte_eth_linkstatus_get(dev, &link);
2289b9246b8fSJiawen Wu 
2290b9246b8fSJiawen Wu 		ngbe_dev_link_update(dev, 0);
22910360c23fSJiawen Wu 		intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
2292b9246b8fSJiawen Wu 		ngbe_dev_link_status_print(dev);
22930360c23fSJiawen Wu 		if (dev->data->dev_link.link_speed != link.link_speed)
22940360c23fSJiawen Wu 			rte_eth_dev_callback_process(dev,
22950360c23fSJiawen Wu 				RTE_ETH_EVENT_INTR_LSC, NULL);
2296b9246b8fSJiawen Wu 	}
2297b9246b8fSJiawen Wu 
2298f3ff9f28SJiawen Wu 	if (intr->flags & NGBE_FLAG_OVERHEAT) {
2299f3ff9f28SJiawen Wu 		ngbe_dev_overheat(dev);
2300f3ff9f28SJiawen Wu 		intr->flags &= ~NGBE_FLAG_OVERHEAT;
2301f3ff9f28SJiawen Wu 	}
2302f3ff9f28SJiawen Wu 
2303b9246b8fSJiawen Wu 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
2304b9246b8fSJiawen Wu 	ngbe_enable_intr(dev);
2305b9246b8fSJiawen Wu 
2306b9246b8fSJiawen Wu 	return 0;
2307b9246b8fSJiawen Wu }
2308b9246b8fSJiawen Wu 
2309b9246b8fSJiawen Wu /**
2310b9246b8fSJiawen Wu  * Interrupt handler triggered by NIC  for handling
2311b9246b8fSJiawen Wu  * specific interrupt.
2312b9246b8fSJiawen Wu  *
2313b9246b8fSJiawen Wu  * @param param
2314b9246b8fSJiawen Wu  *  The address of parameter (struct rte_eth_dev *) registered before.
2315b9246b8fSJiawen Wu  */
2316b9246b8fSJiawen Wu static void
2317b9246b8fSJiawen Wu ngbe_dev_interrupt_handler(void *param)
2318b9246b8fSJiawen Wu {
2319b9246b8fSJiawen Wu 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2320b9246b8fSJiawen Wu 
2321b9246b8fSJiawen Wu 	ngbe_dev_interrupt_get_status(dev);
2322b9246b8fSJiawen Wu 	ngbe_dev_interrupt_action(dev);
2323b9246b8fSJiawen Wu }
2324b9246b8fSJiawen Wu 
2325f40e9f0eSJiawen Wu static int
23264db3db29SJiawen Wu ngbe_dev_led_on(struct rte_eth_dev *dev)
23274db3db29SJiawen Wu {
23284db3db29SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
23294db3db29SJiawen Wu 	return hw->mac.led_on(hw, 0) == 0 ? 0 : -ENOTSUP;
23304db3db29SJiawen Wu }
23314db3db29SJiawen Wu 
23324db3db29SJiawen Wu static int
23334db3db29SJiawen Wu ngbe_dev_led_off(struct rte_eth_dev *dev)
23344db3db29SJiawen Wu {
23354db3db29SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
23364db3db29SJiawen Wu 	return hw->mac.led_off(hw, 0) == 0 ? 0 : -ENOTSUP;
23374db3db29SJiawen Wu }
23384db3db29SJiawen Wu 
23394db3db29SJiawen Wu static int
2340f40e9f0eSJiawen Wu ngbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2341f40e9f0eSJiawen Wu {
2342f40e9f0eSJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2343f40e9f0eSJiawen Wu 	uint32_t mflcn_reg;
2344f40e9f0eSJiawen Wu 	uint32_t fccfg_reg;
2345f40e9f0eSJiawen Wu 	int rx_pause;
2346f40e9f0eSJiawen Wu 	int tx_pause;
2347f40e9f0eSJiawen Wu 
2348f40e9f0eSJiawen Wu 	fc_conf->pause_time = hw->fc.pause_time;
2349f40e9f0eSJiawen Wu 	fc_conf->high_water = hw->fc.high_water;
2350f40e9f0eSJiawen Wu 	fc_conf->low_water = hw->fc.low_water;
2351f40e9f0eSJiawen Wu 	fc_conf->send_xon = hw->fc.send_xon;
2352f40e9f0eSJiawen Wu 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2353f40e9f0eSJiawen Wu 
2354f40e9f0eSJiawen Wu 	/*
2355f40e9f0eSJiawen Wu 	 * Return rx_pause status according to actual setting of
2356f40e9f0eSJiawen Wu 	 * RXFCCFG register.
2357f40e9f0eSJiawen Wu 	 */
2358f40e9f0eSJiawen Wu 	mflcn_reg = rd32(hw, NGBE_RXFCCFG);
2359f40e9f0eSJiawen Wu 	if (mflcn_reg & NGBE_RXFCCFG_FC)
2360f40e9f0eSJiawen Wu 		rx_pause = 1;
2361f40e9f0eSJiawen Wu 	else
2362f40e9f0eSJiawen Wu 		rx_pause = 0;
2363f40e9f0eSJiawen Wu 
2364f40e9f0eSJiawen Wu 	/*
2365f40e9f0eSJiawen Wu 	 * Return tx_pause status according to actual setting of
2366f40e9f0eSJiawen Wu 	 * TXFCCFG register.
2367f40e9f0eSJiawen Wu 	 */
2368f40e9f0eSJiawen Wu 	fccfg_reg = rd32(hw, NGBE_TXFCCFG);
2369f40e9f0eSJiawen Wu 	if (fccfg_reg & NGBE_TXFCCFG_FC)
2370f40e9f0eSJiawen Wu 		tx_pause = 1;
2371f40e9f0eSJiawen Wu 	else
2372f40e9f0eSJiawen Wu 		tx_pause = 0;
2373f40e9f0eSJiawen Wu 
2374f40e9f0eSJiawen Wu 	if (rx_pause && tx_pause)
2375f40e9f0eSJiawen Wu 		fc_conf->mode = RTE_ETH_FC_FULL;
2376f40e9f0eSJiawen Wu 	else if (rx_pause)
2377f40e9f0eSJiawen Wu 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2378f40e9f0eSJiawen Wu 	else if (tx_pause)
2379f40e9f0eSJiawen Wu 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2380f40e9f0eSJiawen Wu 	else
2381f40e9f0eSJiawen Wu 		fc_conf->mode = RTE_ETH_FC_NONE;
2382f40e9f0eSJiawen Wu 
2383f40e9f0eSJiawen Wu 	return 0;
2384f40e9f0eSJiawen Wu }
2385f40e9f0eSJiawen Wu 
2386f40e9f0eSJiawen Wu static int
2387f40e9f0eSJiawen Wu ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2388f40e9f0eSJiawen Wu {
2389f40e9f0eSJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2390f40e9f0eSJiawen Wu 	int err;
2391f40e9f0eSJiawen Wu 	uint32_t rx_buf_size;
2392f40e9f0eSJiawen Wu 	uint32_t max_high_water;
2393f40e9f0eSJiawen Wu 	enum ngbe_fc_mode rte_fcmode_2_ngbe_fcmode[] = {
2394f40e9f0eSJiawen Wu 		ngbe_fc_none,
2395f40e9f0eSJiawen Wu 		ngbe_fc_rx_pause,
2396f40e9f0eSJiawen Wu 		ngbe_fc_tx_pause,
2397f40e9f0eSJiawen Wu 		ngbe_fc_full
2398f40e9f0eSJiawen Wu 	};
2399f40e9f0eSJiawen Wu 
2400f40e9f0eSJiawen Wu 	PMD_INIT_FUNC_TRACE();
2401f40e9f0eSJiawen Wu 
2402f40e9f0eSJiawen Wu 	rx_buf_size = rd32(hw, NGBE_PBRXSIZE);
2403f40e9f0eSJiawen Wu 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2404f40e9f0eSJiawen Wu 
2405f40e9f0eSJiawen Wu 	/*
2406f40e9f0eSJiawen Wu 	 * At least reserve one Ethernet frame for watermark
2407f40e9f0eSJiawen Wu 	 * high_water/low_water in kilo bytes for ngbe
2408f40e9f0eSJiawen Wu 	 */
2409f40e9f0eSJiawen Wu 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2410f40e9f0eSJiawen Wu 	if (fc_conf->high_water > max_high_water ||
2411f40e9f0eSJiawen Wu 	    fc_conf->high_water < fc_conf->low_water) {
2412f40e9f0eSJiawen Wu 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2413f40e9f0eSJiawen Wu 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2414f40e9f0eSJiawen Wu 		return -EINVAL;
2415f40e9f0eSJiawen Wu 	}
2416f40e9f0eSJiawen Wu 
2417f40e9f0eSJiawen Wu 	hw->fc.requested_mode = rte_fcmode_2_ngbe_fcmode[fc_conf->mode];
2418f40e9f0eSJiawen Wu 	hw->fc.pause_time     = fc_conf->pause_time;
2419f40e9f0eSJiawen Wu 	hw->fc.high_water     = fc_conf->high_water;
2420f40e9f0eSJiawen Wu 	hw->fc.low_water      = fc_conf->low_water;
2421f40e9f0eSJiawen Wu 	hw->fc.send_xon       = fc_conf->send_xon;
2422f40e9f0eSJiawen Wu 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2423f40e9f0eSJiawen Wu 
2424f40e9f0eSJiawen Wu 	err = hw->mac.fc_enable(hw);
2425f40e9f0eSJiawen Wu 
2426f40e9f0eSJiawen Wu 	/* Not negotiated is not an error case */
2427f40e9f0eSJiawen Wu 	if (err == 0 || err == NGBE_ERR_FC_NOT_NEGOTIATED) {
2428f40e9f0eSJiawen Wu 		wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_CTL_MASK,
2429f40e9f0eSJiawen Wu 		      (fc_conf->mac_ctrl_frame_fwd
2430f40e9f0eSJiawen Wu 		       ? NGBE_MACRXFLT_CTL_NOPS : NGBE_MACRXFLT_CTL_DROP));
2431f40e9f0eSJiawen Wu 		ngbe_flush(hw);
2432f40e9f0eSJiawen Wu 
2433f40e9f0eSJiawen Wu 		return 0;
2434f40e9f0eSJiawen Wu 	}
2435f40e9f0eSJiawen Wu 
2436f40e9f0eSJiawen Wu 	PMD_INIT_LOG(ERR, "ngbe_fc_enable = 0x%x", err);
2437f40e9f0eSJiawen Wu 	return -EIO;
2438f40e9f0eSJiawen Wu }
2439f40e9f0eSJiawen Wu 
2440d19fa5a1SJiawen Wu /* Additional bittime to account for NGBE framing */
2441d19fa5a1SJiawen Wu #define NGBE_ETH_FRAMING 20
2442d19fa5a1SJiawen Wu 
2443d19fa5a1SJiawen Wu /*
2444d19fa5a1SJiawen Wu  * ngbe_fc_hpbthresh_set - calculate high water mark for flow control
2445d19fa5a1SJiawen Wu  *
2446d19fa5a1SJiawen Wu  * @dv_id: device interface delay
2447d19fa5a1SJiawen Wu  * @pb: packet buffer to calculate
2448d19fa5a1SJiawen Wu  */
2449d19fa5a1SJiawen Wu static s32
2450d19fa5a1SJiawen Wu ngbe_fc_hpbthresh_set(struct rte_eth_dev *dev)
2451d19fa5a1SJiawen Wu {
2452d19fa5a1SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2453d19fa5a1SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2454d19fa5a1SJiawen Wu 	u32 max_frame_size, tc, dv_id, rx_pb;
2455d19fa5a1SJiawen Wu 	s32 kb, marker;
2456d19fa5a1SJiawen Wu 
2457d19fa5a1SJiawen Wu 	/* Calculate max LAN frame size */
2458d19fa5a1SJiawen Wu 	max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK);
2459d19fa5a1SJiawen Wu 	tc = max_frame_size + NGBE_ETH_FRAMING;
2460d19fa5a1SJiawen Wu 
2461d19fa5a1SJiawen Wu 	/* Calculate delay value for device */
2462d19fa5a1SJiawen Wu 	dv_id = NGBE_DV(tc, tc);
2463d19fa5a1SJiawen Wu 
2464d19fa5a1SJiawen Wu 	/* Loopback switch introduces additional latency */
2465d19fa5a1SJiawen Wu 	if (pci_dev->max_vfs)
2466d19fa5a1SJiawen Wu 		dv_id += NGBE_B2BT(tc);
2467d19fa5a1SJiawen Wu 
2468d19fa5a1SJiawen Wu 	/* Delay value is calculated in bit times convert to KB */
2469d19fa5a1SJiawen Wu 	kb = NGBE_BT2KB(dv_id);
2470d19fa5a1SJiawen Wu 	rx_pb = rd32(hw, NGBE_PBRXSIZE) >> 10;
2471d19fa5a1SJiawen Wu 
2472d19fa5a1SJiawen Wu 	marker = rx_pb - kb;
2473d19fa5a1SJiawen Wu 
2474d19fa5a1SJiawen Wu 	/* It is possible that the packet buffer is not large enough
2475d19fa5a1SJiawen Wu 	 * to provide required headroom. In this case throw an error
2476d19fa5a1SJiawen Wu 	 * to user and do the best we can.
2477d19fa5a1SJiawen Wu 	 */
2478d19fa5a1SJiawen Wu 	if (marker < 0) {
2479d19fa5a1SJiawen Wu 		PMD_DRV_LOG(WARNING, "Packet Buffer can not provide enough headroom to support flow control.");
2480d19fa5a1SJiawen Wu 		marker = tc + 1;
2481d19fa5a1SJiawen Wu 	}
2482d19fa5a1SJiawen Wu 
2483d19fa5a1SJiawen Wu 	return marker;
2484d19fa5a1SJiawen Wu }
2485d19fa5a1SJiawen Wu 
2486d19fa5a1SJiawen Wu /*
2487d19fa5a1SJiawen Wu  * ngbe_fc_lpbthresh_set - calculate low water mark for flow control
2488d19fa5a1SJiawen Wu  *
2489d19fa5a1SJiawen Wu  * @dv_id: device interface delay
2490d19fa5a1SJiawen Wu  */
2491d19fa5a1SJiawen Wu static s32
2492d19fa5a1SJiawen Wu ngbe_fc_lpbthresh_set(struct rte_eth_dev *dev)
2493d19fa5a1SJiawen Wu {
2494d19fa5a1SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2495d19fa5a1SJiawen Wu 	u32 max_frame_size, tc, dv_id;
2496d19fa5a1SJiawen Wu 	s32 kb;
2497d19fa5a1SJiawen Wu 
2498d19fa5a1SJiawen Wu 	/* Calculate max LAN frame size */
2499d19fa5a1SJiawen Wu 	max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK);
2500d19fa5a1SJiawen Wu 	tc = max_frame_size + NGBE_ETH_FRAMING;
2501d19fa5a1SJiawen Wu 
2502d19fa5a1SJiawen Wu 	/* Calculate delay value for device */
2503d19fa5a1SJiawen Wu 	dv_id = NGBE_LOW_DV(tc);
2504d19fa5a1SJiawen Wu 
2505d19fa5a1SJiawen Wu 	/* Delay value is calculated in bit times convert to KB */
2506d19fa5a1SJiawen Wu 	kb = NGBE_BT2KB(dv_id);
2507d19fa5a1SJiawen Wu 
2508d19fa5a1SJiawen Wu 	return kb;
2509d19fa5a1SJiawen Wu }
2510d19fa5a1SJiawen Wu 
2511d19fa5a1SJiawen Wu /*
2512d19fa5a1SJiawen Wu  * ngbe_pbthresh_setup - calculate and setup high low water marks
2513d19fa5a1SJiawen Wu  */
2514d19fa5a1SJiawen Wu static void
2515d19fa5a1SJiawen Wu ngbe_pbthresh_set(struct rte_eth_dev *dev)
2516d19fa5a1SJiawen Wu {
2517d19fa5a1SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2518d19fa5a1SJiawen Wu 
2519d19fa5a1SJiawen Wu 	hw->fc.high_water = ngbe_fc_hpbthresh_set(dev);
2520d19fa5a1SJiawen Wu 	hw->fc.low_water = ngbe_fc_lpbthresh_set(dev);
2521d19fa5a1SJiawen Wu 
2522d19fa5a1SJiawen Wu 	/* Low water marks must not be larger than high water marks */
2523d19fa5a1SJiawen Wu 	if (hw->fc.low_water > hw->fc.high_water)
2524d19fa5a1SJiawen Wu 		hw->fc.low_water = 0;
2525d19fa5a1SJiawen Wu }
2526d19fa5a1SJiawen Wu 
25270779d7f6SJiawen Wu int
25280779d7f6SJiawen Wu ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
25290779d7f6SJiawen Wu 			  struct rte_eth_rss_reta_entry64 *reta_conf,
25300779d7f6SJiawen Wu 			  uint16_t reta_size)
25310779d7f6SJiawen Wu {
25320779d7f6SJiawen Wu 	uint8_t i, j, mask;
25330779d7f6SJiawen Wu 	uint32_t reta;
25340779d7f6SJiawen Wu 	uint16_t idx, shift;
25350779d7f6SJiawen Wu 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
25360779d7f6SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
25370779d7f6SJiawen Wu 
25380779d7f6SJiawen Wu 	PMD_INIT_FUNC_TRACE();
25390779d7f6SJiawen Wu 
25400779d7f6SJiawen Wu 	if (!hw->is_pf) {
25410779d7f6SJiawen Wu 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
25420779d7f6SJiawen Wu 			"NIC.");
25430779d7f6SJiawen Wu 		return -ENOTSUP;
25440779d7f6SJiawen Wu 	}
25450779d7f6SJiawen Wu 
25460779d7f6SJiawen Wu 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
25470779d7f6SJiawen Wu 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
25480779d7f6SJiawen Wu 			"(%d) doesn't match the number hardware can supported "
25490779d7f6SJiawen Wu 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
25500779d7f6SJiawen Wu 		return -EINVAL;
25510779d7f6SJiawen Wu 	}
25520779d7f6SJiawen Wu 
25530779d7f6SJiawen Wu 	for (i = 0; i < reta_size; i += 4) {
25540779d7f6SJiawen Wu 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
25550779d7f6SJiawen Wu 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
25560779d7f6SJiawen Wu 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
25570779d7f6SJiawen Wu 		if (!mask)
25580779d7f6SJiawen Wu 			continue;
25590779d7f6SJiawen Wu 
25600779d7f6SJiawen Wu 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
25610779d7f6SJiawen Wu 		for (j = 0; j < 4; j++) {
25620779d7f6SJiawen Wu 			if (RS8(mask, j, 0x1)) {
25630779d7f6SJiawen Wu 				reta  &= ~(MS32(8 * j, 0xFF));
25640779d7f6SJiawen Wu 				reta |= LS32(reta_conf[idx].reta[shift + j],
25650779d7f6SJiawen Wu 						8 * j, 0xFF);
25660779d7f6SJiawen Wu 			}
25670779d7f6SJiawen Wu 		}
25680779d7f6SJiawen Wu 		wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
25690779d7f6SJiawen Wu 	}
25700779d7f6SJiawen Wu 	adapter->rss_reta_updated = 1;
25710779d7f6SJiawen Wu 
25720779d7f6SJiawen Wu 	return 0;
25730779d7f6SJiawen Wu }
25740779d7f6SJiawen Wu 
25750779d7f6SJiawen Wu int
25760779d7f6SJiawen Wu ngbe_dev_rss_reta_query(struct rte_eth_dev *dev,
25770779d7f6SJiawen Wu 			 struct rte_eth_rss_reta_entry64 *reta_conf,
25780779d7f6SJiawen Wu 			 uint16_t reta_size)
25790779d7f6SJiawen Wu {
25800779d7f6SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
25810779d7f6SJiawen Wu 	uint8_t i, j, mask;
25820779d7f6SJiawen Wu 	uint32_t reta;
25830779d7f6SJiawen Wu 	uint16_t idx, shift;
25840779d7f6SJiawen Wu 
25850779d7f6SJiawen Wu 	PMD_INIT_FUNC_TRACE();
25860779d7f6SJiawen Wu 
25870779d7f6SJiawen Wu 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
25880779d7f6SJiawen Wu 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
25890779d7f6SJiawen Wu 			"(%d) doesn't match the number hardware can supported "
25900779d7f6SJiawen Wu 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
25910779d7f6SJiawen Wu 		return -EINVAL;
25920779d7f6SJiawen Wu 	}
25930779d7f6SJiawen Wu 
25940779d7f6SJiawen Wu 	for (i = 0; i < reta_size; i += 4) {
25950779d7f6SJiawen Wu 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
25960779d7f6SJiawen Wu 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
25970779d7f6SJiawen Wu 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
25980779d7f6SJiawen Wu 		if (!mask)
25990779d7f6SJiawen Wu 			continue;
26000779d7f6SJiawen Wu 
26010779d7f6SJiawen Wu 		reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
26020779d7f6SJiawen Wu 		for (j = 0; j < 4; j++) {
26030779d7f6SJiawen Wu 			if (RS8(mask, j, 0x1))
26040779d7f6SJiawen Wu 				reta_conf[idx].reta[shift + j] =
26050779d7f6SJiawen Wu 					(uint16_t)RS32(reta, 8 * j, 0xFF);
26060779d7f6SJiawen Wu 		}
26070779d7f6SJiawen Wu 	}
26080779d7f6SJiawen Wu 
26090779d7f6SJiawen Wu 	return 0;
26100779d7f6SJiawen Wu }
26110779d7f6SJiawen Wu 
261207baabb6SJiawen Wu static int
2613dee93977SJiawen Wu ngbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
2614dee93977SJiawen Wu 				uint32_t index, uint32_t pool)
2615dee93977SJiawen Wu {
2616dee93977SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2617dee93977SJiawen Wu 	uint32_t enable_addr = 1;
2618dee93977SJiawen Wu 
2619dee93977SJiawen Wu 	return ngbe_set_rar(hw, index, mac_addr->addr_bytes,
2620dee93977SJiawen Wu 			     pool, enable_addr);
2621dee93977SJiawen Wu }
2622dee93977SJiawen Wu 
2623dee93977SJiawen Wu static void
2624dee93977SJiawen Wu ngbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2625dee93977SJiawen Wu {
2626dee93977SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2627dee93977SJiawen Wu 
2628dee93977SJiawen Wu 	ngbe_clear_rar(hw, index);
2629dee93977SJiawen Wu }
2630dee93977SJiawen Wu 
2631dee93977SJiawen Wu static int
2632dee93977SJiawen Wu ngbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
2633dee93977SJiawen Wu {
2634dee93977SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2635dee93977SJiawen Wu 
2636dee93977SJiawen Wu 	ngbe_remove_rar(dev, 0);
2637dee93977SJiawen Wu 	ngbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
2638dee93977SJiawen Wu 
2639dee93977SJiawen Wu 	return 0;
2640dee93977SJiawen Wu }
2641dee93977SJiawen Wu 
2642dee93977SJiawen Wu static int
264307baabb6SJiawen Wu ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
264407baabb6SJiawen Wu {
264507baabb6SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2646d5774f0cSJiawen Wu 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
264707baabb6SJiawen Wu 	struct rte_eth_dev_data *dev_data = dev->data;
264807baabb6SJiawen Wu 
264907baabb6SJiawen Wu 	/* If device is started, refuse mtu that requires the support of
265007baabb6SJiawen Wu 	 * scattered packets when this feature has not been enabled before.
265107baabb6SJiawen Wu 	 */
265207baabb6SJiawen Wu 	if (dev_data->dev_started && !dev_data->scattered_rx &&
265325cf2630SFerruh Yigit 	    (frame_size + 2 * RTE_VLAN_HLEN >
265407baabb6SJiawen Wu 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
265507baabb6SJiawen Wu 		PMD_INIT_LOG(ERR, "Stop port first.");
265607baabb6SJiawen Wu 		return -EINVAL;
265707baabb6SJiawen Wu 	}
265807baabb6SJiawen Wu 
265907baabb6SJiawen Wu 	wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
266007baabb6SJiawen Wu 		NGBE_FRMSZ_MAX(frame_size));
266107baabb6SJiawen Wu 
266207baabb6SJiawen Wu 	return 0;
266307baabb6SJiawen Wu }
266407baabb6SJiawen Wu 
2665dee93977SJiawen Wu static uint32_t
2666dee93977SJiawen Wu ngbe_uta_vector(struct ngbe_hw *hw, struct rte_ether_addr *uc_addr)
2667dee93977SJiawen Wu {
2668dee93977SJiawen Wu 	uint32_t vector = 0;
2669dee93977SJiawen Wu 
2670dee93977SJiawen Wu 	switch (hw->mac.mc_filter_type) {
2671dee93977SJiawen Wu 	case 0:   /* use bits [47:36] of the address */
2672dee93977SJiawen Wu 		vector = ((uc_addr->addr_bytes[4] >> 4) |
2673dee93977SJiawen Wu 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
2674dee93977SJiawen Wu 		break;
2675dee93977SJiawen Wu 	case 1:   /* use bits [46:35] of the address */
2676dee93977SJiawen Wu 		vector = ((uc_addr->addr_bytes[4] >> 3) |
2677dee93977SJiawen Wu 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
2678dee93977SJiawen Wu 		break;
2679dee93977SJiawen Wu 	case 2:   /* use bits [45:34] of the address */
2680dee93977SJiawen Wu 		vector = ((uc_addr->addr_bytes[4] >> 2) |
2681dee93977SJiawen Wu 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
2682dee93977SJiawen Wu 		break;
2683dee93977SJiawen Wu 	case 3:   /* use bits [43:32] of the address */
2684dee93977SJiawen Wu 		vector = ((uc_addr->addr_bytes[4]) |
2685dee93977SJiawen Wu 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
2686dee93977SJiawen Wu 		break;
2687dee93977SJiawen Wu 	default:  /* Invalid mc_filter_type */
2688dee93977SJiawen Wu 		break;
2689dee93977SJiawen Wu 	}
2690dee93977SJiawen Wu 
2691dee93977SJiawen Wu 	/* vector can only be 12-bits or boundary will be exceeded */
2692dee93977SJiawen Wu 	vector &= 0xFFF;
2693dee93977SJiawen Wu 	return vector;
2694dee93977SJiawen Wu }
2695dee93977SJiawen Wu 
2696dee93977SJiawen Wu static int
2697dee93977SJiawen Wu ngbe_uc_hash_table_set(struct rte_eth_dev *dev,
2698dee93977SJiawen Wu 			struct rte_ether_addr *mac_addr, uint8_t on)
2699dee93977SJiawen Wu {
2700dee93977SJiawen Wu 	uint32_t vector;
2701dee93977SJiawen Wu 	uint32_t uta_idx;
2702dee93977SJiawen Wu 	uint32_t reg_val;
2703dee93977SJiawen Wu 	uint32_t uta_mask;
2704dee93977SJiawen Wu 	uint32_t psrctl;
2705dee93977SJiawen Wu 
2706dee93977SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2707dee93977SJiawen Wu 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2708dee93977SJiawen Wu 
2709dee93977SJiawen Wu 	vector = ngbe_uta_vector(hw, mac_addr);
2710dee93977SJiawen Wu 	uta_idx = (vector >> 5) & 0x7F;
2711dee93977SJiawen Wu 	uta_mask = 0x1UL << (vector & 0x1F);
2712dee93977SJiawen Wu 
2713dee93977SJiawen Wu 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
2714dee93977SJiawen Wu 		return 0;
2715dee93977SJiawen Wu 
2716dee93977SJiawen Wu 	reg_val = rd32(hw, NGBE_UCADDRTBL(uta_idx));
2717dee93977SJiawen Wu 	if (on) {
2718dee93977SJiawen Wu 		uta_info->uta_in_use++;
2719dee93977SJiawen Wu 		reg_val |= uta_mask;
2720dee93977SJiawen Wu 		uta_info->uta_shadow[uta_idx] |= uta_mask;
2721dee93977SJiawen Wu 	} else {
2722dee93977SJiawen Wu 		uta_info->uta_in_use--;
2723dee93977SJiawen Wu 		reg_val &= ~uta_mask;
2724dee93977SJiawen Wu 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
2725dee93977SJiawen Wu 	}
2726dee93977SJiawen Wu 
2727dee93977SJiawen Wu 	wr32(hw, NGBE_UCADDRTBL(uta_idx), reg_val);
2728dee93977SJiawen Wu 
2729dee93977SJiawen Wu 	psrctl = rd32(hw, NGBE_PSRCTL);
2730dee93977SJiawen Wu 	if (uta_info->uta_in_use > 0)
2731dee93977SJiawen Wu 		psrctl |= NGBE_PSRCTL_UCHFENA;
2732dee93977SJiawen Wu 	else
2733dee93977SJiawen Wu 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2734dee93977SJiawen Wu 
2735dee93977SJiawen Wu 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2736dee93977SJiawen Wu 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2737dee93977SJiawen Wu 	wr32(hw, NGBE_PSRCTL, psrctl);
2738dee93977SJiawen Wu 
2739dee93977SJiawen Wu 	return 0;
2740dee93977SJiawen Wu }
2741dee93977SJiawen Wu 
2742dee93977SJiawen Wu static int
2743dee93977SJiawen Wu ngbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2744dee93977SJiawen Wu {
2745dee93977SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2746dee93977SJiawen Wu 	struct ngbe_uta_info *uta_info = NGBE_DEV_UTA_INFO(dev);
2747dee93977SJiawen Wu 	uint32_t psrctl;
2748dee93977SJiawen Wu 	int i;
2749dee93977SJiawen Wu 
2750dee93977SJiawen Wu 	if (on) {
2751dee93977SJiawen Wu 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2752dee93977SJiawen Wu 			uta_info->uta_shadow[i] = ~0;
2753dee93977SJiawen Wu 			wr32(hw, NGBE_UCADDRTBL(i), ~0);
2754dee93977SJiawen Wu 		}
2755dee93977SJiawen Wu 	} else {
2756dee93977SJiawen Wu 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2757dee93977SJiawen Wu 			uta_info->uta_shadow[i] = 0;
2758dee93977SJiawen Wu 			wr32(hw, NGBE_UCADDRTBL(i), 0);
2759dee93977SJiawen Wu 		}
2760dee93977SJiawen Wu 	}
2761dee93977SJiawen Wu 
2762dee93977SJiawen Wu 	psrctl = rd32(hw, NGBE_PSRCTL);
2763dee93977SJiawen Wu 	if (on)
2764dee93977SJiawen Wu 		psrctl |= NGBE_PSRCTL_UCHFENA;
2765dee93977SJiawen Wu 	else
2766dee93977SJiawen Wu 		psrctl &= ~NGBE_PSRCTL_UCHFENA;
2767dee93977SJiawen Wu 
2768dee93977SJiawen Wu 	psrctl &= ~NGBE_PSRCTL_ADHF12_MASK;
2769dee93977SJiawen Wu 	psrctl |= NGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
2770dee93977SJiawen Wu 	wr32(hw, NGBE_PSRCTL, psrctl);
2771dee93977SJiawen Wu 
2772dee93977SJiawen Wu 	return 0;
2773dee93977SJiawen Wu }
2774dee93977SJiawen Wu 
27755547efc7SJiawen Wu static int
27765547efc7SJiawen Wu ngbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
27775547efc7SJiawen Wu {
27785547efc7SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
27795547efc7SJiawen Wu 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
27805547efc7SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
27815547efc7SJiawen Wu 	uint32_t mask;
27825547efc7SJiawen Wu 
27835547efc7SJiawen Wu 	mask = rd32(hw, NGBE_IMC(0));
27845547efc7SJiawen Wu 	mask |= (1 << queue_id);
27855547efc7SJiawen Wu 	wr32(hw, NGBE_IMC(0), mask);
27865547efc7SJiawen Wu 	rte_intr_enable(intr_handle);
27875547efc7SJiawen Wu 
27885547efc7SJiawen Wu 	return 0;
27895547efc7SJiawen Wu }
27905547efc7SJiawen Wu 
27915547efc7SJiawen Wu static int
27925547efc7SJiawen Wu ngbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
27935547efc7SJiawen Wu {
27945547efc7SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
27955547efc7SJiawen Wu 	uint32_t mask;
27965547efc7SJiawen Wu 
27975547efc7SJiawen Wu 	mask = rd32(hw, NGBE_IMS(0));
27985547efc7SJiawen Wu 	mask |= (1 << queue_id);
27995547efc7SJiawen Wu 	wr32(hw, NGBE_IMS(0), mask);
28005547efc7SJiawen Wu 
28015547efc7SJiawen Wu 	return 0;
28025547efc7SJiawen Wu }
28035547efc7SJiawen Wu 
28043518df57SJiawen Wu /**
28053518df57SJiawen Wu  * Set the IVAR registers, mapping interrupt causes to vectors
28063518df57SJiawen Wu  * @param hw
28073518df57SJiawen Wu  *  pointer to ngbe_hw struct
28083518df57SJiawen Wu  * @direction
28093518df57SJiawen Wu  *  0 for Rx, 1 for Tx, -1 for other causes
28103518df57SJiawen Wu  * @queue
28113518df57SJiawen Wu  *  queue to map the corresponding interrupt to
28123518df57SJiawen Wu  * @msix_vector
28133518df57SJiawen Wu  *  the vector to map to the corresponding queue
28143518df57SJiawen Wu  */
28153518df57SJiawen Wu void
28163518df57SJiawen Wu ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
28173518df57SJiawen Wu 		   uint8_t queue, uint8_t msix_vector)
28183518df57SJiawen Wu {
28193518df57SJiawen Wu 	uint32_t tmp, idx;
28203518df57SJiawen Wu 
28213518df57SJiawen Wu 	if (direction == -1) {
28223518df57SJiawen Wu 		/* other causes */
28233518df57SJiawen Wu 		msix_vector |= NGBE_IVARMISC_VLD;
28243518df57SJiawen Wu 		idx = 0;
28253518df57SJiawen Wu 		tmp = rd32(hw, NGBE_IVARMISC);
28263518df57SJiawen Wu 		tmp &= ~(0xFF << idx);
28273518df57SJiawen Wu 		tmp |= (msix_vector << idx);
28283518df57SJiawen Wu 		wr32(hw, NGBE_IVARMISC, tmp);
28293518df57SJiawen Wu 	} else {
28303518df57SJiawen Wu 		/* rx or tx causes */
28315547efc7SJiawen Wu 		msix_vector |= NGBE_IVAR_VLD; /* Workaround for ICR lost */
28323518df57SJiawen Wu 		idx = ((16 * (queue & 1)) + (8 * direction));
28333518df57SJiawen Wu 		tmp = rd32(hw, NGBE_IVAR(queue >> 1));
28343518df57SJiawen Wu 		tmp &= ~(0xFF << idx);
28353518df57SJiawen Wu 		tmp |= (msix_vector << idx);
28363518df57SJiawen Wu 		wr32(hw, NGBE_IVAR(queue >> 1), tmp);
28373518df57SJiawen Wu 	}
28383518df57SJiawen Wu }
28393518df57SJiawen Wu 
28403518df57SJiawen Wu /**
28413518df57SJiawen Wu  * Sets up the hardware to properly generate MSI-X interrupts
28423518df57SJiawen Wu  * @hw
28433518df57SJiawen Wu  *  board private structure
28443518df57SJiawen Wu  */
28453518df57SJiawen Wu static void
28463518df57SJiawen Wu ngbe_configure_msix(struct rte_eth_dev *dev)
28473518df57SJiawen Wu {
28483518df57SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2849d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
28503518df57SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
28513518df57SJiawen Wu 	uint32_t queue_id, base = NGBE_MISC_VEC_ID;
28523518df57SJiawen Wu 	uint32_t vec = NGBE_MISC_VEC_ID;
28533518df57SJiawen Wu 	uint32_t gpie;
28543518df57SJiawen Wu 
28553518df57SJiawen Wu 	/*
28563518df57SJiawen Wu 	 * Won't configure MSI-X register if no mapping is done
28573518df57SJiawen Wu 	 * between intr vector and event fd
28583518df57SJiawen Wu 	 * but if MSI-X has been enabled already, need to configure
28593518df57SJiawen Wu 	 * auto clean, auto mask and throttling.
28603518df57SJiawen Wu 	 */
28613518df57SJiawen Wu 	gpie = rd32(hw, NGBE_GPIE);
28623518df57SJiawen Wu 	if (!rte_intr_dp_is_en(intr_handle) &&
28633518df57SJiawen Wu 	    !(gpie & NGBE_GPIE_MSIX))
28643518df57SJiawen Wu 		return;
28653518df57SJiawen Wu 
28663518df57SJiawen Wu 	if (rte_intr_allow_others(intr_handle)) {
28673518df57SJiawen Wu 		base = NGBE_RX_VEC_START;
28683518df57SJiawen Wu 		vec = base;
28693518df57SJiawen Wu 	}
28703518df57SJiawen Wu 
28713518df57SJiawen Wu 	/* setup GPIE for MSI-X mode */
28723518df57SJiawen Wu 	gpie = rd32(hw, NGBE_GPIE);
28733518df57SJiawen Wu 	gpie |= NGBE_GPIE_MSIX;
28743518df57SJiawen Wu 	wr32(hw, NGBE_GPIE, gpie);
28753518df57SJiawen Wu 
28763518df57SJiawen Wu 	/* Populate the IVAR table and set the ITR values to the
28773518df57SJiawen Wu 	 * corresponding register.
28783518df57SJiawen Wu 	 */
28793518df57SJiawen Wu 	if (rte_intr_dp_is_en(intr_handle)) {
28803518df57SJiawen Wu 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
28813518df57SJiawen Wu 			queue_id++) {
28823518df57SJiawen Wu 			/* by default, 1:1 mapping */
28833518df57SJiawen Wu 			ngbe_set_ivar_map(hw, 0, queue_id, vec);
2884d61138d4SHarman Kalra 			rte_intr_vec_list_index_set(intr_handle,
2885d61138d4SHarman Kalra 							   queue_id, vec);
2886d61138d4SHarman Kalra 			if (vec < base + rte_intr_nb_efd_get(intr_handle)
2887d61138d4SHarman Kalra 			    - 1)
28883518df57SJiawen Wu 				vec++;
28893518df57SJiawen Wu 		}
28903518df57SJiawen Wu 
28913518df57SJiawen Wu 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
28923518df57SJiawen Wu 	}
28933518df57SJiawen Wu 	wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
28943518df57SJiawen Wu 			NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
28953518df57SJiawen Wu 			| NGBE_ITR_WRDSA);
28963518df57SJiawen Wu }
28973518df57SJiawen Wu 
2898dee93977SJiawen Wu static u8 *
2899dee93977SJiawen Wu ngbe_dev_addr_list_itr(__rte_unused struct ngbe_hw *hw,
2900dee93977SJiawen Wu 			u8 **mc_addr_ptr, u32 *vmdq)
2901dee93977SJiawen Wu {
2902dee93977SJiawen Wu 	u8 *mc_addr;
2903dee93977SJiawen Wu 
2904dee93977SJiawen Wu 	*vmdq = 0;
2905dee93977SJiawen Wu 	mc_addr = *mc_addr_ptr;
2906dee93977SJiawen Wu 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
2907dee93977SJiawen Wu 	return mc_addr;
2908dee93977SJiawen Wu }
2909dee93977SJiawen Wu 
2910dee93977SJiawen Wu int
2911dee93977SJiawen Wu ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
2912dee93977SJiawen Wu 			  struct rte_ether_addr *mc_addr_set,
2913dee93977SJiawen Wu 			  uint32_t nb_mc_addr)
2914dee93977SJiawen Wu {
2915dee93977SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
2916dee93977SJiawen Wu 	u8 *mc_addr_list;
2917dee93977SJiawen Wu 
2918dee93977SJiawen Wu 	mc_addr_list = (u8 *)mc_addr_set;
2919dee93977SJiawen Wu 	return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
2920dee93977SJiawen Wu 					 ngbe_dev_addr_list_itr, TRUE);
2921dee93977SJiawen Wu }
2922dee93977SJiawen Wu 
292324cd85f7SJiawen Wu static uint64_t
292424cd85f7SJiawen Wu ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
292524cd85f7SJiawen Wu {
292624cd85f7SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
292724cd85f7SJiawen Wu 	uint64_t systime_cycles;
292824cd85f7SJiawen Wu 
292924cd85f7SJiawen Wu 	systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
293024cd85f7SJiawen Wu 	systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
293124cd85f7SJiawen Wu 
293224cd85f7SJiawen Wu 	return systime_cycles;
293324cd85f7SJiawen Wu }
293424cd85f7SJiawen Wu 
293524cd85f7SJiawen Wu static uint64_t
293624cd85f7SJiawen Wu ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
293724cd85f7SJiawen Wu {
293824cd85f7SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
293924cd85f7SJiawen Wu 	uint64_t rx_tstamp_cycles;
294024cd85f7SJiawen Wu 
294124cd85f7SJiawen Wu 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
294224cd85f7SJiawen Wu 	rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
294324cd85f7SJiawen Wu 	rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
294424cd85f7SJiawen Wu 
294524cd85f7SJiawen Wu 	return rx_tstamp_cycles;
294624cd85f7SJiawen Wu }
294724cd85f7SJiawen Wu 
294824cd85f7SJiawen Wu static uint64_t
294924cd85f7SJiawen Wu ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
295024cd85f7SJiawen Wu {
295124cd85f7SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
295224cd85f7SJiawen Wu 	uint64_t tx_tstamp_cycles;
295324cd85f7SJiawen Wu 
295424cd85f7SJiawen Wu 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
295524cd85f7SJiawen Wu 	tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
295624cd85f7SJiawen Wu 	tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
295724cd85f7SJiawen Wu 
295824cd85f7SJiawen Wu 	return tx_tstamp_cycles;
295924cd85f7SJiawen Wu }
296024cd85f7SJiawen Wu 
296124cd85f7SJiawen Wu static void
296224cd85f7SJiawen Wu ngbe_start_timecounters(struct rte_eth_dev *dev)
296324cd85f7SJiawen Wu {
296424cd85f7SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
296524cd85f7SJiawen Wu 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
296624cd85f7SJiawen Wu 	uint32_t incval = 0;
296724cd85f7SJiawen Wu 	uint32_t shift = 0;
296824cd85f7SJiawen Wu 
296924cd85f7SJiawen Wu 	incval = NGBE_INCVAL_1GB;
297024cd85f7SJiawen Wu 	shift = NGBE_INCVAL_SHIFT_1GB;
297124cd85f7SJiawen Wu 
297224cd85f7SJiawen Wu 	wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
297324cd85f7SJiawen Wu 
297424cd85f7SJiawen Wu 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
297524cd85f7SJiawen Wu 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
297624cd85f7SJiawen Wu 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
297724cd85f7SJiawen Wu 
297824cd85f7SJiawen Wu 	adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
297924cd85f7SJiawen Wu 	adapter->systime_tc.cc_shift = shift;
298024cd85f7SJiawen Wu 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
298124cd85f7SJiawen Wu 
298224cd85f7SJiawen Wu 	adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
298324cd85f7SJiawen Wu 	adapter->rx_tstamp_tc.cc_shift = shift;
298424cd85f7SJiawen Wu 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
298524cd85f7SJiawen Wu 
298624cd85f7SJiawen Wu 	adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
298724cd85f7SJiawen Wu 	adapter->tx_tstamp_tc.cc_shift = shift;
298824cd85f7SJiawen Wu 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
298924cd85f7SJiawen Wu }
299024cd85f7SJiawen Wu 
299124cd85f7SJiawen Wu static int
299224cd85f7SJiawen Wu ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
299324cd85f7SJiawen Wu {
299424cd85f7SJiawen Wu 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
299524cd85f7SJiawen Wu 
299624cd85f7SJiawen Wu 	adapter->systime_tc.nsec += delta;
299724cd85f7SJiawen Wu 	adapter->rx_tstamp_tc.nsec += delta;
299824cd85f7SJiawen Wu 	adapter->tx_tstamp_tc.nsec += delta;
299924cd85f7SJiawen Wu 
300024cd85f7SJiawen Wu 	return 0;
300124cd85f7SJiawen Wu }
300224cd85f7SJiawen Wu 
300324cd85f7SJiawen Wu static int
300424cd85f7SJiawen Wu ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
300524cd85f7SJiawen Wu {
300624cd85f7SJiawen Wu 	uint64_t ns;
300724cd85f7SJiawen Wu 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
300824cd85f7SJiawen Wu 
300924cd85f7SJiawen Wu 	ns = rte_timespec_to_ns(ts);
301024cd85f7SJiawen Wu 	/* Set the timecounters to a new value. */
301124cd85f7SJiawen Wu 	adapter->systime_tc.nsec = ns;
301224cd85f7SJiawen Wu 	adapter->rx_tstamp_tc.nsec = ns;
301324cd85f7SJiawen Wu 	adapter->tx_tstamp_tc.nsec = ns;
301424cd85f7SJiawen Wu 
301524cd85f7SJiawen Wu 	return 0;
301624cd85f7SJiawen Wu }
301724cd85f7SJiawen Wu 
301824cd85f7SJiawen Wu static int
301924cd85f7SJiawen Wu ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
302024cd85f7SJiawen Wu {
302124cd85f7SJiawen Wu 	uint64_t ns, systime_cycles;
302224cd85f7SJiawen Wu 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
302324cd85f7SJiawen Wu 
302424cd85f7SJiawen Wu 	systime_cycles = ngbe_read_systime_cyclecounter(dev);
302524cd85f7SJiawen Wu 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
302624cd85f7SJiawen Wu 	*ts = rte_ns_to_timespec(ns);
302724cd85f7SJiawen Wu 
302824cd85f7SJiawen Wu 	return 0;
302924cd85f7SJiawen Wu }
303024cd85f7SJiawen Wu 
303124cd85f7SJiawen Wu static int
303224cd85f7SJiawen Wu ngbe_timesync_enable(struct rte_eth_dev *dev)
303324cd85f7SJiawen Wu {
303424cd85f7SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
303524cd85f7SJiawen Wu 	uint32_t tsync_ctl;
303624cd85f7SJiawen Wu 
303724cd85f7SJiawen Wu 	/* Stop the timesync system time. */
303824cd85f7SJiawen Wu 	wr32(hw, NGBE_TSTIMEINC, 0x0);
303924cd85f7SJiawen Wu 	/* Reset the timesync system time value. */
304024cd85f7SJiawen Wu 	wr32(hw, NGBE_TSTIMEL, 0x0);
304124cd85f7SJiawen Wu 	wr32(hw, NGBE_TSTIMEH, 0x0);
304224cd85f7SJiawen Wu 
304324cd85f7SJiawen Wu 	ngbe_start_timecounters(dev);
304424cd85f7SJiawen Wu 
304524cd85f7SJiawen Wu 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
304624cd85f7SJiawen Wu 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588),
304724cd85f7SJiawen Wu 		RTE_ETHER_TYPE_1588 | NGBE_ETFLT_ENA | NGBE_ETFLT_1588);
304824cd85f7SJiawen Wu 
304924cd85f7SJiawen Wu 	/* Enable timestamping of received PTP packets. */
305024cd85f7SJiawen Wu 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
305124cd85f7SJiawen Wu 	tsync_ctl |= NGBE_TSRXCTL_ENA;
305224cd85f7SJiawen Wu 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
305324cd85f7SJiawen Wu 
305424cd85f7SJiawen Wu 	/* Enable timestamping of transmitted PTP packets. */
305524cd85f7SJiawen Wu 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
305624cd85f7SJiawen Wu 	tsync_ctl |= NGBE_TSTXCTL_ENA;
305724cd85f7SJiawen Wu 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
305824cd85f7SJiawen Wu 
305924cd85f7SJiawen Wu 	ngbe_flush(hw);
306024cd85f7SJiawen Wu 
306124cd85f7SJiawen Wu 	return 0;
306224cd85f7SJiawen Wu }
306324cd85f7SJiawen Wu 
306424cd85f7SJiawen Wu static int
306524cd85f7SJiawen Wu ngbe_timesync_disable(struct rte_eth_dev *dev)
306624cd85f7SJiawen Wu {
306724cd85f7SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
306824cd85f7SJiawen Wu 	uint32_t tsync_ctl;
306924cd85f7SJiawen Wu 
307024cd85f7SJiawen Wu 	/* Disable timestamping of transmitted PTP packets. */
307124cd85f7SJiawen Wu 	tsync_ctl = rd32(hw, NGBE_TSTXCTL);
307224cd85f7SJiawen Wu 	tsync_ctl &= ~NGBE_TSTXCTL_ENA;
307324cd85f7SJiawen Wu 	wr32(hw, NGBE_TSTXCTL, tsync_ctl);
307424cd85f7SJiawen Wu 
307524cd85f7SJiawen Wu 	/* Disable timestamping of received PTP packets. */
307624cd85f7SJiawen Wu 	tsync_ctl = rd32(hw, NGBE_TSRXCTL);
307724cd85f7SJiawen Wu 	tsync_ctl &= ~NGBE_TSRXCTL_ENA;
307824cd85f7SJiawen Wu 	wr32(hw, NGBE_TSRXCTL, tsync_ctl);
307924cd85f7SJiawen Wu 
308024cd85f7SJiawen Wu 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
308124cd85f7SJiawen Wu 	wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
308224cd85f7SJiawen Wu 
30837be78d02SJosh Soref 	/* Stop incrementing the System Time registers. */
308424cd85f7SJiawen Wu 	wr32(hw, NGBE_TSTIMEINC, 0);
308524cd85f7SJiawen Wu 
308624cd85f7SJiawen Wu 	return 0;
308724cd85f7SJiawen Wu }
308824cd85f7SJiawen Wu 
308924cd85f7SJiawen Wu static int
309024cd85f7SJiawen Wu ngbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
309124cd85f7SJiawen Wu 				 struct timespec *timestamp,
309224cd85f7SJiawen Wu 				 uint32_t flags __rte_unused)
309324cd85f7SJiawen Wu {
309424cd85f7SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
309524cd85f7SJiawen Wu 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
309624cd85f7SJiawen Wu 	uint32_t tsync_rxctl;
309724cd85f7SJiawen Wu 	uint64_t rx_tstamp_cycles;
309824cd85f7SJiawen Wu 	uint64_t ns;
309924cd85f7SJiawen Wu 
310024cd85f7SJiawen Wu 	tsync_rxctl = rd32(hw, NGBE_TSRXCTL);
310124cd85f7SJiawen Wu 	if ((tsync_rxctl & NGBE_TSRXCTL_VLD) == 0)
310224cd85f7SJiawen Wu 		return -EINVAL;
310324cd85f7SJiawen Wu 
310424cd85f7SJiawen Wu 	rx_tstamp_cycles = ngbe_read_rx_tstamp_cyclecounter(dev);
310524cd85f7SJiawen Wu 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
310624cd85f7SJiawen Wu 	*timestamp = rte_ns_to_timespec(ns);
310724cd85f7SJiawen Wu 
310824cd85f7SJiawen Wu 	return  0;
310924cd85f7SJiawen Wu }
311024cd85f7SJiawen Wu 
311124cd85f7SJiawen Wu static int
311224cd85f7SJiawen Wu ngbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
311324cd85f7SJiawen Wu 				 struct timespec *timestamp)
311424cd85f7SJiawen Wu {
311524cd85f7SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
311624cd85f7SJiawen Wu 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
311724cd85f7SJiawen Wu 	uint32_t tsync_txctl;
311824cd85f7SJiawen Wu 	uint64_t tx_tstamp_cycles;
311924cd85f7SJiawen Wu 	uint64_t ns;
312024cd85f7SJiawen Wu 
312124cd85f7SJiawen Wu 	tsync_txctl = rd32(hw, NGBE_TSTXCTL);
312224cd85f7SJiawen Wu 	if ((tsync_txctl & NGBE_TSTXCTL_VLD) == 0)
312324cd85f7SJiawen Wu 		return -EINVAL;
312424cd85f7SJiawen Wu 
312524cd85f7SJiawen Wu 	tx_tstamp_cycles = ngbe_read_tx_tstamp_cyclecounter(dev);
312624cd85f7SJiawen Wu 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
312724cd85f7SJiawen Wu 	*timestamp = rte_ns_to_timespec(ns);
312824cd85f7SJiawen Wu 
312924cd85f7SJiawen Wu 	return 0;
313024cd85f7SJiawen Wu }
313124cd85f7SJiawen Wu 
31329459ea29SJiawen Wu static int
313371aec127SJiawen Wu ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
313471aec127SJiawen Wu {
313571aec127SJiawen Wu 	int count = 0;
313671aec127SJiawen Wu 	int g_ind = 0;
313771aec127SJiawen Wu 	const struct reg_info *reg_group;
313871aec127SJiawen Wu 	const struct reg_info **reg_set = ngbe_regs_others;
313971aec127SJiawen Wu 
314071aec127SJiawen Wu 	while ((reg_group = reg_set[g_ind++]))
314171aec127SJiawen Wu 		count += ngbe_regs_group_count(reg_group);
314271aec127SJiawen Wu 
314371aec127SJiawen Wu 	return count;
314471aec127SJiawen Wu }
314571aec127SJiawen Wu 
314671aec127SJiawen Wu static int
314771aec127SJiawen Wu ngbe_get_regs(struct rte_eth_dev *dev,
314871aec127SJiawen Wu 	      struct rte_dev_reg_info *regs)
314971aec127SJiawen Wu {
315071aec127SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
315171aec127SJiawen Wu 	uint32_t *data = regs->data;
315271aec127SJiawen Wu 	int g_ind = 0;
315371aec127SJiawen Wu 	int count = 0;
315471aec127SJiawen Wu 	const struct reg_info *reg_group;
315571aec127SJiawen Wu 	const struct reg_info **reg_set = ngbe_regs_others;
315671aec127SJiawen Wu 
315771aec127SJiawen Wu 	if (data == NULL) {
315871aec127SJiawen Wu 		regs->length = ngbe_get_reg_length(dev);
315971aec127SJiawen Wu 		regs->width = sizeof(uint32_t);
316071aec127SJiawen Wu 		return 0;
316171aec127SJiawen Wu 	}
316271aec127SJiawen Wu 
316371aec127SJiawen Wu 	/* Support only full register dump */
316471aec127SJiawen Wu 	if (regs->length == 0 ||
316571aec127SJiawen Wu 	    regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
316671aec127SJiawen Wu 		regs->version = hw->mac.type << 24 |
316771aec127SJiawen Wu 				hw->revision_id << 16 |
316871aec127SJiawen Wu 				hw->device_id;
316971aec127SJiawen Wu 		while ((reg_group = reg_set[g_ind++]))
317071aec127SJiawen Wu 			count += ngbe_read_regs_group(dev, &data[count],
317171aec127SJiawen Wu 						      reg_group);
317271aec127SJiawen Wu 		return 0;
317371aec127SJiawen Wu 	}
317471aec127SJiawen Wu 
317571aec127SJiawen Wu 	return -ENOTSUP;
317671aec127SJiawen Wu }
317771aec127SJiawen Wu 
317871aec127SJiawen Wu static int
31799459ea29SJiawen Wu ngbe_get_eeprom_length(struct rte_eth_dev *dev)
31809459ea29SJiawen Wu {
31819459ea29SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
31829459ea29SJiawen Wu 
31839459ea29SJiawen Wu 	/* Return unit is byte count */
31849459ea29SJiawen Wu 	return hw->rom.word_size * 2;
31859459ea29SJiawen Wu }
31869459ea29SJiawen Wu 
31879459ea29SJiawen Wu static int
31889459ea29SJiawen Wu ngbe_get_eeprom(struct rte_eth_dev *dev,
31899459ea29SJiawen Wu 		struct rte_dev_eeprom_info *in_eeprom)
31909459ea29SJiawen Wu {
31919459ea29SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
31929459ea29SJiawen Wu 	struct ngbe_rom_info *eeprom = &hw->rom;
31939459ea29SJiawen Wu 	uint16_t *data = in_eeprom->data;
31949459ea29SJiawen Wu 	int first, length;
31959459ea29SJiawen Wu 
31969459ea29SJiawen Wu 	first = in_eeprom->offset >> 1;
31979459ea29SJiawen Wu 	length = in_eeprom->length >> 1;
31989459ea29SJiawen Wu 	if (first > hw->rom.word_size ||
31999459ea29SJiawen Wu 	    ((first + length) > hw->rom.word_size))
32009459ea29SJiawen Wu 		return -EINVAL;
32019459ea29SJiawen Wu 
32029459ea29SJiawen Wu 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
32039459ea29SJiawen Wu 
32049459ea29SJiawen Wu 	return eeprom->readw_buffer(hw, first, length, data);
32059459ea29SJiawen Wu }
32069459ea29SJiawen Wu 
32079459ea29SJiawen Wu static int
32089459ea29SJiawen Wu ngbe_set_eeprom(struct rte_eth_dev *dev,
32099459ea29SJiawen Wu 		struct rte_dev_eeprom_info *in_eeprom)
32109459ea29SJiawen Wu {
32119459ea29SJiawen Wu 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
32129459ea29SJiawen Wu 	struct ngbe_rom_info *eeprom = &hw->rom;
32139459ea29SJiawen Wu 	uint16_t *data = in_eeprom->data;
32149459ea29SJiawen Wu 	int first, length;
32159459ea29SJiawen Wu 
32169459ea29SJiawen Wu 	first = in_eeprom->offset >> 1;
32179459ea29SJiawen Wu 	length = in_eeprom->length >> 1;
32189459ea29SJiawen Wu 	if (first > hw->rom.word_size ||
32199459ea29SJiawen Wu 	    ((first + length) > hw->rom.word_size))
32209459ea29SJiawen Wu 		return -EINVAL;
32219459ea29SJiawen Wu 
32229459ea29SJiawen Wu 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
32239459ea29SJiawen Wu 
32249459ea29SJiawen Wu 	return eeprom->writew_buffer(hw,  first, length, data);
32259459ea29SJiawen Wu }
32269459ea29SJiawen Wu 
3227b9246b8fSJiawen Wu static const struct eth_dev_ops ngbe_eth_dev_ops = {
3228b9246b8fSJiawen Wu 	.dev_configure              = ngbe_dev_configure,
3229b9246b8fSJiawen Wu 	.dev_infos_get              = ngbe_dev_info_get,
32303518df57SJiawen Wu 	.dev_start                  = ngbe_dev_start,
32313518df57SJiawen Wu 	.dev_stop                   = ngbe_dev_stop,
3232abea8974SJiawen Wu 	.dev_set_link_up            = ngbe_dev_set_link_up,
3233abea8974SJiawen Wu 	.dev_set_link_down          = ngbe_dev_set_link_down,
3234cc63194eSJiawen Wu 	.dev_close                  = ngbe_dev_close,
3235cc63194eSJiawen Wu 	.dev_reset                  = ngbe_dev_reset,
3236b83372a0SJiawen Wu 	.promiscuous_enable         = ngbe_dev_promiscuous_enable,
3237b83372a0SJiawen Wu 	.promiscuous_disable        = ngbe_dev_promiscuous_disable,
3238b83372a0SJiawen Wu 	.allmulticast_enable        = ngbe_dev_allmulticast_enable,
3239b83372a0SJiawen Wu 	.allmulticast_disable       = ngbe_dev_allmulticast_disable,
3240b9246b8fSJiawen Wu 	.link_update                = ngbe_dev_link_update,
3241fdb1e851SJiawen Wu 	.stats_get                  = ngbe_dev_stats_get,
32428b433d04SJiawen Wu 	.xstats_get                 = ngbe_dev_xstats_get,
32438b433d04SJiawen Wu 	.xstats_get_by_id           = ngbe_dev_xstats_get_by_id,
3244fdb1e851SJiawen Wu 	.stats_reset                = ngbe_dev_stats_reset,
32458b433d04SJiawen Wu 	.xstats_reset               = ngbe_dev_xstats_reset,
32468b433d04SJiawen Wu 	.xstats_get_names           = ngbe_dev_xstats_get_names,
32478b433d04SJiawen Wu 	.xstats_get_names_by_id     = ngbe_dev_xstats_get_names_by_id,
3248506abd4aSJiawen Wu 	.fw_version_get             = ngbe_fw_version_get,
3249f6aef1daSJiawen Wu 	.dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
325007baabb6SJiawen Wu 	.mtu_set                    = ngbe_dev_mtu_set,
325159b46438SJiawen Wu 	.vlan_filter_set            = ngbe_vlan_filter_set,
325259b46438SJiawen Wu 	.vlan_tpid_set              = ngbe_vlan_tpid_set,
325359b46438SJiawen Wu 	.vlan_offload_set           = ngbe_vlan_offload_set,
325459b46438SJiawen Wu 	.vlan_strip_queue_set       = ngbe_vlan_strip_queue_set,
325562fc35e6SJiawen Wu 	.rx_queue_start	            = ngbe_dev_rx_queue_start,
325662fc35e6SJiawen Wu 	.rx_queue_stop              = ngbe_dev_rx_queue_stop,
3257001c7823SJiawen Wu 	.tx_queue_start	            = ngbe_dev_tx_queue_start,
3258001c7823SJiawen Wu 	.tx_queue_stop              = ngbe_dev_tx_queue_stop,
325943b7e5eaSJiawen Wu 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
326043b7e5eaSJiawen Wu 	.rx_queue_release           = ngbe_dev_rx_queue_release,
3261a58e7c31SJiawen Wu 	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
3262a58e7c31SJiawen Wu 	.tx_queue_release           = ngbe_dev_tx_queue_release,
32635547efc7SJiawen Wu 	.rx_queue_intr_enable       = ngbe_dev_rx_queue_intr_enable,
32645547efc7SJiawen Wu 	.rx_queue_intr_disable      = ngbe_dev_rx_queue_intr_disable,
32654db3db29SJiawen Wu 	.dev_led_on                 = ngbe_dev_led_on,
32664db3db29SJiawen Wu 	.dev_led_off                = ngbe_dev_led_off,
3267f40e9f0eSJiawen Wu 	.flow_ctrl_get              = ngbe_flow_ctrl_get,
3268f40e9f0eSJiawen Wu 	.flow_ctrl_set              = ngbe_flow_ctrl_set,
3269dee93977SJiawen Wu 	.mac_addr_add               = ngbe_add_rar,
3270dee93977SJiawen Wu 	.mac_addr_remove            = ngbe_remove_rar,
3271dee93977SJiawen Wu 	.mac_addr_set               = ngbe_set_default_mac_addr,
3272dee93977SJiawen Wu 	.uc_hash_table_set          = ngbe_uc_hash_table_set,
3273dee93977SJiawen Wu 	.uc_all_hash_table_set      = ngbe_uc_all_hash_table_set,
32740779d7f6SJiawen Wu 	.reta_update                = ngbe_dev_rss_reta_update,
32750779d7f6SJiawen Wu 	.reta_query                 = ngbe_dev_rss_reta_query,
32760779d7f6SJiawen Wu 	.rss_hash_update            = ngbe_dev_rss_hash_update,
32770779d7f6SJiawen Wu 	.rss_hash_conf_get          = ngbe_dev_rss_hash_conf_get,
3278dee93977SJiawen Wu 	.set_mc_addr_list           = ngbe_dev_set_mc_addr_list,
3279eec3e736SJiawen Wu 	.rxq_info_get               = ngbe_rxq_info_get,
3280eec3e736SJiawen Wu 	.txq_info_get               = ngbe_txq_info_get,
3281eec3e736SJiawen Wu 	.rx_burst_mode_get          = ngbe_rx_burst_mode_get,
3282eec3e736SJiawen Wu 	.tx_burst_mode_get          = ngbe_tx_burst_mode_get,
328324cd85f7SJiawen Wu 	.timesync_enable            = ngbe_timesync_enable,
328424cd85f7SJiawen Wu 	.timesync_disable           = ngbe_timesync_disable,
328524cd85f7SJiawen Wu 	.timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
328624cd85f7SJiawen Wu 	.timesync_read_tx_timestamp = ngbe_timesync_read_tx_timestamp,
328771aec127SJiawen Wu 	.get_reg                    = ngbe_get_regs,
32889459ea29SJiawen Wu 	.get_eeprom_length          = ngbe_get_eeprom_length,
32899459ea29SJiawen Wu 	.get_eeprom                 = ngbe_get_eeprom,
32909459ea29SJiawen Wu 	.set_eeprom                 = ngbe_set_eeprom,
329124cd85f7SJiawen Wu 	.timesync_adjust_time       = ngbe_timesync_adjust_time,
329224cd85f7SJiawen Wu 	.timesync_read_time         = ngbe_timesync_read_time,
329324cd85f7SJiawen Wu 	.timesync_write_time        = ngbe_timesync_write_time,
3294d0759b50SJiawen Wu 	.tx_done_cleanup            = ngbe_dev_tx_done_cleanup,
3295b9246b8fSJiawen Wu };
3296b9246b8fSJiawen Wu 
329726590b52SJiawen Wu RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
32986ee7e574SJiawen Wu RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
32996ee7e574SJiawen Wu RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
3300cc934df1SJiawen Wu 
3301cc934df1SJiawen Wu RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
3302cc934df1SJiawen Wu RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
3303cc934df1SJiawen Wu 
3304cc934df1SJiawen Wu #ifdef RTE_ETHDEV_DEBUG_RX
3305cc934df1SJiawen Wu 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
3306cc934df1SJiawen Wu #endif
3307cc934df1SJiawen Wu #ifdef RTE_ETHDEV_DEBUG_TX
3308cc934df1SJiawen Wu 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
3309cc934df1SJiawen Wu #endif
3310