xref: /dpdk/drivers/net/axgbe/axgbe_dev.c (revision 47cf4ac19e2aa94ac1f33c3bec7b2e4f04171680)
1572890efSRavi Kumar /*   SPDX-License-Identifier: BSD-3-Clause
2572890efSRavi Kumar  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3572890efSRavi Kumar  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4572890efSRavi Kumar  */
5572890efSRavi Kumar 
6572890efSRavi Kumar #include "axgbe_ethdev.h"
7572890efSRavi Kumar #include "axgbe_common.h"
8572890efSRavi Kumar #include "axgbe_phy.h"
97c4158a5SRavi Kumar #include "axgbe_rxtx.h"
107c4158a5SRavi Kumar 
1186578516SGirish Nandibasappa static uint32_t bitrev32(uint32_t x)
1286578516SGirish Nandibasappa {
1386578516SGirish Nandibasappa 	x = (x >> 16) | (x << 16);
1486578516SGirish Nandibasappa 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1586578516SGirish Nandibasappa 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1686578516SGirish Nandibasappa 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1786578516SGirish Nandibasappa 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1886578516SGirish Nandibasappa 	return x;
1986578516SGirish Nandibasappa }
2086578516SGirish Nandibasappa 
2186578516SGirish Nandibasappa /*MSB set bit from 32 to 1*/
2286578516SGirish Nandibasappa static int get_lastbit_set(int x)
2386578516SGirish Nandibasappa {
2486578516SGirish Nandibasappa 	int r = 32;
2586578516SGirish Nandibasappa 
2686578516SGirish Nandibasappa 	if (!x)
2786578516SGirish Nandibasappa 		return 0;
2886578516SGirish Nandibasappa 	if (!(x & 0xffff0000)) {
2986578516SGirish Nandibasappa 		x <<= 16;
3086578516SGirish Nandibasappa 		r -= 16;
3186578516SGirish Nandibasappa 	}
3286578516SGirish Nandibasappa 	if (!(x & 0xff000000)) {
3386578516SGirish Nandibasappa 		x <<= 8;
3486578516SGirish Nandibasappa 		r -= 8;
3586578516SGirish Nandibasappa 	}
3686578516SGirish Nandibasappa 	if (!(x & 0xf0000000)) {
3786578516SGirish Nandibasappa 		x <<= 4;
3886578516SGirish Nandibasappa 		r -= 4;
3986578516SGirish Nandibasappa 	}
4086578516SGirish Nandibasappa 	if (!(x & 0xc0000000)) {
4186578516SGirish Nandibasappa 		x <<= 2;
4286578516SGirish Nandibasappa 		r -= 2;
4386578516SGirish Nandibasappa 	}
4486578516SGirish Nandibasappa 	if (!(x & 0x80000000)) {
4586578516SGirish Nandibasappa 		x <<= 1;
4686578516SGirish Nandibasappa 		r -= 1;
4786578516SGirish Nandibasappa 	}
4886578516SGirish Nandibasappa 	return r;
4986578516SGirish Nandibasappa }
5086578516SGirish Nandibasappa 
517c4158a5SRavi Kumar static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
527c4158a5SRavi Kumar {
5335b2d13fSOlivier Matz 	return pdata->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
5425cf2630SFerruh Yigit 		RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN;
557c4158a5SRavi Kumar }
56572890efSRavi Kumar 
574ac7516bSRavi Kumar /* query busy bit */
584ac7516bSRavi Kumar static int mdio_complete(struct axgbe_port *pdata)
594ac7516bSRavi Kumar {
604ac7516bSRavi Kumar 	if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY))
614ac7516bSRavi Kumar 		return 1;
624ac7516bSRavi Kumar 
634ac7516bSRavi Kumar 	return 0;
644ac7516bSRavi Kumar }
654ac7516bSRavi Kumar 
66627ab524SVenkat Kumar Ande static unsigned int axgbe_create_mdio_sca_c22(int port, int reg)
67d06394d2SVenkat Kumar Ande {
68627ab524SVenkat Kumar Ande 	unsigned int mdio_sca;
69d06394d2SVenkat Kumar Ande 
70627ab524SVenkat Kumar Ande 	mdio_sca = 0;
71627ab524SVenkat Kumar Ande 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
72627ab524SVenkat Kumar Ande 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
73627ab524SVenkat Kumar Ande 
74627ab524SVenkat Kumar Ande 	return mdio_sca;
75627ab524SVenkat Kumar Ande }
76627ab524SVenkat Kumar Ande 
77627ab524SVenkat Kumar Ande static unsigned int axgbe_create_mdio_sca_c45(int port, unsigned int da, int reg)
78627ab524SVenkat Kumar Ande {
79627ab524SVenkat Kumar Ande 	unsigned int mdio_sca;
80d06394d2SVenkat Kumar Ande 
81d06394d2SVenkat Kumar Ande 	mdio_sca = 0;
82d06394d2SVenkat Kumar Ande 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
83d06394d2SVenkat Kumar Ande 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
84d06394d2SVenkat Kumar Ande 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
85d06394d2SVenkat Kumar Ande 
86d06394d2SVenkat Kumar Ande 	return mdio_sca;
87d06394d2SVenkat Kumar Ande }
88d06394d2SVenkat Kumar Ande 
89627ab524SVenkat Kumar Ande static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata,
90627ab524SVenkat Kumar Ande 						unsigned int mdio_sca, u16 val)
914ac7516bSRavi Kumar {
92627ab524SVenkat Kumar Ande 	unsigned int mdio_sccd;
934ac7516bSRavi Kumar 	uint64_t timeout;
944ac7516bSRavi Kumar 
954ac7516bSRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
964ac7516bSRavi Kumar 
974ac7516bSRavi Kumar 	mdio_sccd = 0;
984ac7516bSRavi Kumar 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
994ac7516bSRavi Kumar 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
1004ac7516bSRavi Kumar 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1014ac7516bSRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1024ac7516bSRavi Kumar 
1034ac7516bSRavi Kumar 	timeout = rte_get_timer_cycles() + rte_get_timer_hz();
1044ac7516bSRavi Kumar 	while (time_before(rte_get_timer_cycles(), timeout)) {
1054ac7516bSRavi Kumar 		rte_delay_us(100);
1064ac7516bSRavi Kumar 		if (mdio_complete(pdata))
1074ac7516bSRavi Kumar 			return 0;
1084ac7516bSRavi Kumar 	}
1094ac7516bSRavi Kumar 
1104ac7516bSRavi Kumar 	PMD_DRV_LOG(ERR, "Mdio write operation timed out\n");
1114ac7516bSRavi Kumar 	return -ETIMEDOUT;
1124ac7516bSRavi Kumar }
1134ac7516bSRavi Kumar 
114627ab524SVenkat Kumar Ande 
115627ab524SVenkat Kumar Ande static int axgbe_write_ext_mii_regs_c22(struct axgbe_port *pdata,
116627ab524SVenkat Kumar Ande 							int addr, int reg, u16 val)
1174ac7516bSRavi Kumar {
118627ab524SVenkat Kumar Ande 	unsigned int mdio_sca;
119627ab524SVenkat Kumar Ande 
120627ab524SVenkat Kumar Ande 	mdio_sca = axgbe_create_mdio_sca_c22(addr, reg);
121627ab524SVenkat Kumar Ande 
122627ab524SVenkat Kumar Ande 	return axgbe_write_ext_mii_regs(pdata, mdio_sca, val);
123627ab524SVenkat Kumar Ande }
124627ab524SVenkat Kumar Ande 
125627ab524SVenkat Kumar Ande static int axgbe_write_ext_mii_regs_c45(struct axgbe_port *pdata,
126627ab524SVenkat Kumar Ande 					int addr, int devad, int reg, u16 val)
127627ab524SVenkat Kumar Ande {
128627ab524SVenkat Kumar Ande 	unsigned int mdio_sca;
129627ab524SVenkat Kumar Ande 
130627ab524SVenkat Kumar Ande 	mdio_sca = axgbe_create_mdio_sca_c45(addr, devad, reg);
131627ab524SVenkat Kumar Ande 
132627ab524SVenkat Kumar Ande 	return axgbe_write_ext_mii_regs(pdata, mdio_sca, val);
133627ab524SVenkat Kumar Ande }
134627ab524SVenkat Kumar Ande 
135627ab524SVenkat Kumar Ande 
136627ab524SVenkat Kumar Ande static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata,
137627ab524SVenkat Kumar Ande 							unsigned int mdio_sca)
138627ab524SVenkat Kumar Ande {
139627ab524SVenkat Kumar Ande 	unsigned int mdio_sccd;
1404ac7516bSRavi Kumar 	uint64_t timeout;
1414ac7516bSRavi Kumar 
1424ac7516bSRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1434ac7516bSRavi Kumar 
1444ac7516bSRavi Kumar 	mdio_sccd = 0;
1454ac7516bSRavi Kumar 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
1464ac7516bSRavi Kumar 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1474ac7516bSRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1484ac7516bSRavi Kumar 
1494ac7516bSRavi Kumar 	timeout = rte_get_timer_cycles() + rte_get_timer_hz();
1504ac7516bSRavi Kumar 
1514ac7516bSRavi Kumar 	while (time_before(rte_get_timer_cycles(), timeout)) {
1524ac7516bSRavi Kumar 		rte_delay_us(100);
1534ac7516bSRavi Kumar 		if (mdio_complete(pdata))
1544ac7516bSRavi Kumar 			goto success;
1554ac7516bSRavi Kumar 	}
1564ac7516bSRavi Kumar 
1574ac7516bSRavi Kumar 	PMD_DRV_LOG(ERR, "Mdio read operation timed out\n");
1584ac7516bSRavi Kumar 	return -ETIMEDOUT;
1594ac7516bSRavi Kumar 
1604ac7516bSRavi Kumar success:
1614ac7516bSRavi Kumar 	return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
1624ac7516bSRavi Kumar }
1634ac7516bSRavi Kumar 
164627ab524SVenkat Kumar Ande static int axgbe_read_ext_mii_regs_c22(struct axgbe_port *pdata, int addr, int reg)
165627ab524SVenkat Kumar Ande {
166627ab524SVenkat Kumar Ande 	unsigned int mdio_sca;
167627ab524SVenkat Kumar Ande 
168627ab524SVenkat Kumar Ande 	mdio_sca = axgbe_create_mdio_sca_c22(addr, reg);
169627ab524SVenkat Kumar Ande 
170627ab524SVenkat Kumar Ande 	return axgbe_read_ext_mii_regs(pdata, mdio_sca);
171627ab524SVenkat Kumar Ande }
172627ab524SVenkat Kumar Ande 
173627ab524SVenkat Kumar Ande static int axgbe_read_ext_mii_regs_c45(struct axgbe_port *pdata, int addr,
174627ab524SVenkat Kumar Ande 								int devad, int reg)
175627ab524SVenkat Kumar Ande {
176627ab524SVenkat Kumar Ande 	unsigned int mdio_sca;
177627ab524SVenkat Kumar Ande 
178627ab524SVenkat Kumar Ande 	mdio_sca = axgbe_create_mdio_sca_c45(addr, devad, reg);
179627ab524SVenkat Kumar Ande 
180627ab524SVenkat Kumar Ande 	return axgbe_read_ext_mii_regs(pdata, mdio_sca);
181627ab524SVenkat Kumar Ande }
182627ab524SVenkat Kumar Ande 
1834ac7516bSRavi Kumar static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port,
1844ac7516bSRavi Kumar 				  enum axgbe_mdio_mode mode)
1854ac7516bSRavi Kumar {
1864ac7516bSRavi Kumar 	unsigned int reg_val = 0;
1874ac7516bSRavi Kumar 
1884ac7516bSRavi Kumar 	switch (mode) {
1894ac7516bSRavi Kumar 	case AXGBE_MDIO_MODE_CL22:
1904ac7516bSRavi Kumar 		if (port > AXGMAC_MAX_C22_PORT)
1914ac7516bSRavi Kumar 			return -EINVAL;
1924ac7516bSRavi Kumar 		reg_val |= (1 << port);
1934ac7516bSRavi Kumar 		break;
1944ac7516bSRavi Kumar 	case AXGBE_MDIO_MODE_CL45:
1954ac7516bSRavi Kumar 		break;
1964ac7516bSRavi Kumar 	default:
1974ac7516bSRavi Kumar 		return -EINVAL;
1984ac7516bSRavi Kumar 	}
1994ac7516bSRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
2004ac7516bSRavi Kumar 
2014ac7516bSRavi Kumar 	return 0;
2024ac7516bSRavi Kumar }
2034ac7516bSRavi Kumar 
2044ac7516bSRavi Kumar static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
2054ac7516bSRavi Kumar 				  int prtad __rte_unused, int mmd_reg)
2064ac7516bSRavi Kumar {
2074ac7516bSRavi Kumar 	unsigned int mmd_address, index, offset;
2084ac7516bSRavi Kumar 	int mmd_data;
2094ac7516bSRavi Kumar 
210*47cf4ac1SVenkat Kumar Ande 	if (mmd_reg & AXGBE_ADDR_C45)
211*47cf4ac1SVenkat Kumar Ande 		mmd_address = mmd_reg & ~AXGBE_ADDR_C45;
2124ac7516bSRavi Kumar 	else
2134ac7516bSRavi Kumar 		mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
2144ac7516bSRavi Kumar 
2154ac7516bSRavi Kumar 	/* The PCS registers are accessed using mmio. The underlying
2164ac7516bSRavi Kumar 	 * management interface uses indirect addressing to access the MMD
2174ac7516bSRavi Kumar 	 * register sets. This requires accessing of the PCS register in two
2184ac7516bSRavi Kumar 	 * phases, an address phase and a data phase.
2194ac7516bSRavi Kumar 	 *
2204ac7516bSRavi Kumar 	 * The mmio interface is based on 16-bit offsets and values. All
2214ac7516bSRavi Kumar 	 * register offsets must therefore be adjusted by left shifting the
2224ac7516bSRavi Kumar 	 * offset 1 bit and reading 16 bits of data.
2234ac7516bSRavi Kumar 	 */
2244ac7516bSRavi Kumar 	mmd_address <<= 1;
2254ac7516bSRavi Kumar 	index = mmd_address & ~pdata->xpcs_window_mask;
2264ac7516bSRavi Kumar 	offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
2274ac7516bSRavi Kumar 
2284ac7516bSRavi Kumar 	pthread_mutex_lock(&pdata->xpcs_mutex);
2294ac7516bSRavi Kumar 
2304ac7516bSRavi Kumar 	XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
2314ac7516bSRavi Kumar 	mmd_data = XPCS16_IOREAD(pdata, offset);
2324ac7516bSRavi Kumar 
2334ac7516bSRavi Kumar 	pthread_mutex_unlock(&pdata->xpcs_mutex);
2344ac7516bSRavi Kumar 
2354ac7516bSRavi Kumar 	return mmd_data;
2364ac7516bSRavi Kumar }
2374ac7516bSRavi Kumar 
2384ac7516bSRavi Kumar static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
2394ac7516bSRavi Kumar 				    int prtad __rte_unused,
2404ac7516bSRavi Kumar 				    int mmd_reg, int mmd_data)
2414ac7516bSRavi Kumar {
2424ac7516bSRavi Kumar 	unsigned int mmd_address, index, offset;
2434ac7516bSRavi Kumar 
244*47cf4ac1SVenkat Kumar Ande 	if (mmd_reg & AXGBE_ADDR_C45)
245*47cf4ac1SVenkat Kumar Ande 		mmd_address = mmd_reg & ~AXGBE_ADDR_C45;
2464ac7516bSRavi Kumar 	else
2474ac7516bSRavi Kumar 		mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
2484ac7516bSRavi Kumar 
2494ac7516bSRavi Kumar 	/* The PCS registers are accessed using mmio. The underlying
2504ac7516bSRavi Kumar 	 * management interface uses indirect addressing to access the MMD
2514ac7516bSRavi Kumar 	 * register sets. This requires accessing of the PCS register in two
2524ac7516bSRavi Kumar 	 * phases, an address phase and a data phase.
2534ac7516bSRavi Kumar 	 *
2544ac7516bSRavi Kumar 	 * The mmio interface is based on 16-bit offsets and values. All
2554ac7516bSRavi Kumar 	 * register offsets must therefore be adjusted by left shifting the
2564ac7516bSRavi Kumar 	 * offset 1 bit and writing 16 bits of data.
2574ac7516bSRavi Kumar 	 */
2584ac7516bSRavi Kumar 	mmd_address <<= 1;
2594ac7516bSRavi Kumar 	index = mmd_address & ~pdata->xpcs_window_mask;
2604ac7516bSRavi Kumar 	offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
2614ac7516bSRavi Kumar 
2624ac7516bSRavi Kumar 	pthread_mutex_lock(&pdata->xpcs_mutex);
2634ac7516bSRavi Kumar 
2644ac7516bSRavi Kumar 	XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
2654ac7516bSRavi Kumar 	XPCS16_IOWRITE(pdata, offset, mmd_data);
2664ac7516bSRavi Kumar 
2674ac7516bSRavi Kumar 	pthread_mutex_unlock(&pdata->xpcs_mutex);
2684ac7516bSRavi Kumar }
2694ac7516bSRavi Kumar 
2704ac7516bSRavi Kumar static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
2714ac7516bSRavi Kumar 			       int mmd_reg)
2724ac7516bSRavi Kumar {
2734ac7516bSRavi Kumar 	switch (pdata->vdata->xpcs_access) {
2744ac7516bSRavi Kumar 	case AXGBE_XPCS_ACCESS_V1:
2754ac7516bSRavi Kumar 		PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
2764ac7516bSRavi Kumar 		return -1;
2774ac7516bSRavi Kumar 	case AXGBE_XPCS_ACCESS_V2:
2784ac7516bSRavi Kumar 	default:
2794ac7516bSRavi Kumar 		return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
2804ac7516bSRavi Kumar 	}
2814ac7516bSRavi Kumar }
2824ac7516bSRavi Kumar 
2834ac7516bSRavi Kumar static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
2844ac7516bSRavi Kumar 				 int mmd_reg, int mmd_data)
2854ac7516bSRavi Kumar {
2864ac7516bSRavi Kumar 	switch (pdata->vdata->xpcs_access) {
2874ac7516bSRavi Kumar 	case AXGBE_XPCS_ACCESS_V1:
2884ac7516bSRavi Kumar 		PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
2894ac7516bSRavi Kumar 		return;
2904ac7516bSRavi Kumar 	case AXGBE_XPCS_ACCESS_V2:
2914ac7516bSRavi Kumar 	default:
2924ac7516bSRavi Kumar 		return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
2934ac7516bSRavi Kumar 	}
2944ac7516bSRavi Kumar }
2954ac7516bSRavi Kumar 
296a5c72737SRavi Kumar static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
297a5c72737SRavi Kumar {
298a5c72737SRavi Kumar 	unsigned int ss;
299a5c72737SRavi Kumar 
300a5c72737SRavi Kumar 	switch (speed) {
3011f9d2d3aSVenkat Kumar Ande 	case SPEED_10:
3021f9d2d3aSVenkat Kumar Ande 		ss = 0x07;
3031f9d2d3aSVenkat Kumar Ande 		break;
304a5c72737SRavi Kumar 	case SPEED_1000:
305a5c72737SRavi Kumar 		ss = 0x03;
306a5c72737SRavi Kumar 		break;
307a5c72737SRavi Kumar 	case SPEED_2500:
308a5c72737SRavi Kumar 		ss = 0x02;
309a5c72737SRavi Kumar 		break;
310a5c72737SRavi Kumar 	case SPEED_10000:
311a5c72737SRavi Kumar 		ss = 0x00;
312a5c72737SRavi Kumar 		break;
313a5c72737SRavi Kumar 	default:
314a5c72737SRavi Kumar 		return -EINVAL;
315a5c72737SRavi Kumar 	}
316a5c72737SRavi Kumar 
317a5c72737SRavi Kumar 	if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
318a5c72737SRavi Kumar 		AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
319a5c72737SRavi Kumar 
320a5c72737SRavi Kumar 	return 0;
321a5c72737SRavi Kumar }
322a5c72737SRavi Kumar 
323b4b24f3eSVenkat Kumar Ande static unsigned int axgbe_get_fc_queue_count(struct axgbe_port *pdata)
324b4b24f3eSVenkat Kumar Ande {
325b4b24f3eSVenkat Kumar Ande 	unsigned int max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
326b4b24f3eSVenkat Kumar Ande 
327b4b24f3eSVenkat Kumar Ande 	/* From MAC ver 30H the TFCR is per priority, instead of per queue */
328b4b24f3eSVenkat Kumar Ande 	if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
329b4b24f3eSVenkat Kumar Ande 		return max_q_count;
330b4b24f3eSVenkat Kumar Ande 	else
331b4b24f3eSVenkat Kumar Ande 		return (RTE_MIN(pdata->tx_q_count, max_q_count));
332b4b24f3eSVenkat Kumar Ande }
333b4b24f3eSVenkat Kumar Ande 
3347c4158a5SRavi Kumar static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
3357c4158a5SRavi Kumar {
3367c4158a5SRavi Kumar 	unsigned int reg, reg_val;
337b4b24f3eSVenkat Kumar Ande 	unsigned int i, q_count;
3387c4158a5SRavi Kumar 
3397c4158a5SRavi Kumar 	/* Clear MTL flow control */
3407c4158a5SRavi Kumar 	for (i = 0; i < pdata->rx_q_count; i++)
3417c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
3427c4158a5SRavi Kumar 
3437c4158a5SRavi Kumar 	/* Clear MAC flow control */
344b4b24f3eSVenkat Kumar Ande 	q_count = axgbe_get_fc_queue_count(pdata);
3457c4158a5SRavi Kumar 	reg = MAC_Q0TFCR;
3467c4158a5SRavi Kumar 	for (i = 0; i < q_count; i++) {
3477c4158a5SRavi Kumar 		reg_val = AXGMAC_IOREAD(pdata, reg);
3487c4158a5SRavi Kumar 		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
3497c4158a5SRavi Kumar 		AXGMAC_IOWRITE(pdata, reg, reg_val);
3507c4158a5SRavi Kumar 
3517c4158a5SRavi Kumar 		reg += MAC_QTFCR_INC;
3527c4158a5SRavi Kumar 	}
3537c4158a5SRavi Kumar 
3547c4158a5SRavi Kumar 	return 0;
3557c4158a5SRavi Kumar }
3567c4158a5SRavi Kumar 
3577c4158a5SRavi Kumar static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
3587c4158a5SRavi Kumar {
3597c4158a5SRavi Kumar 	unsigned int reg, reg_val;
360b4b24f3eSVenkat Kumar Ande 	unsigned int i, q_count;
3617c4158a5SRavi Kumar 
3627c4158a5SRavi Kumar 	/* Set MTL flow control */
3637c4158a5SRavi Kumar 	for (i = 0; i < pdata->rx_q_count; i++) {
3647c4158a5SRavi Kumar 		unsigned int ehfc = 0;
3657c4158a5SRavi Kumar 
3667c4158a5SRavi Kumar 		/* Flow control thresholds are established */
3677c4158a5SRavi Kumar 		if (pdata->rx_rfd[i])
3687c4158a5SRavi Kumar 			ehfc = 1;
3697c4158a5SRavi Kumar 
3707c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
3714216cdc0SChandu Babu N 
3724216cdc0SChandu Babu N 		PMD_DRV_LOG(DEBUG, "flow control %s for RXq%u\n",
3734216cdc0SChandu Babu N 			    ehfc ? "enabled" : "disabled", i);
3747c4158a5SRavi Kumar 	}
3757c4158a5SRavi Kumar 
3767c4158a5SRavi Kumar 	/* Set MAC flow control */
377b4b24f3eSVenkat Kumar Ande 	q_count = axgbe_get_fc_queue_count(pdata);
3787c4158a5SRavi Kumar 	reg = MAC_Q0TFCR;
3797c4158a5SRavi Kumar 	for (i = 0; i < q_count; i++) {
3807c4158a5SRavi Kumar 		reg_val = AXGMAC_IOREAD(pdata, reg);
3817c4158a5SRavi Kumar 
3827c4158a5SRavi Kumar 		/* Enable transmit flow control */
3837c4158a5SRavi Kumar 		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
3847c4158a5SRavi Kumar 		/* Set pause time */
3857c4158a5SRavi Kumar 		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
3867c4158a5SRavi Kumar 
3877c4158a5SRavi Kumar 		AXGMAC_IOWRITE(pdata, reg, reg_val);
3887c4158a5SRavi Kumar 
3897c4158a5SRavi Kumar 		reg += MAC_QTFCR_INC;
3907c4158a5SRavi Kumar 	}
3917c4158a5SRavi Kumar 
3927c4158a5SRavi Kumar 	return 0;
3937c4158a5SRavi Kumar }
3947c4158a5SRavi Kumar 
3957c4158a5SRavi Kumar static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)
3967c4158a5SRavi Kumar {
3977c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
3987c4158a5SRavi Kumar 
3997c4158a5SRavi Kumar 	return 0;
4007c4158a5SRavi Kumar }
4017c4158a5SRavi Kumar 
4027c4158a5SRavi Kumar static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)
4037c4158a5SRavi Kumar {
4047c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
4057c4158a5SRavi Kumar 
4067c4158a5SRavi Kumar 	return 0;
4077c4158a5SRavi Kumar }
4087c4158a5SRavi Kumar 
4097c4158a5SRavi Kumar static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)
4107c4158a5SRavi Kumar {
4117c4158a5SRavi Kumar 	if (pdata->tx_pause)
4127c4158a5SRavi Kumar 		axgbe_enable_tx_flow_control(pdata);
4137c4158a5SRavi Kumar 	else
4147c4158a5SRavi Kumar 		axgbe_disable_tx_flow_control(pdata);
4157c4158a5SRavi Kumar 
4167c4158a5SRavi Kumar 	return 0;
4177c4158a5SRavi Kumar }
4187c4158a5SRavi Kumar 
4197c4158a5SRavi Kumar static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)
4207c4158a5SRavi Kumar {
4217c4158a5SRavi Kumar 	if (pdata->rx_pause)
4227c4158a5SRavi Kumar 		axgbe_enable_rx_flow_control(pdata);
4237c4158a5SRavi Kumar 	else
4247c4158a5SRavi Kumar 		axgbe_disable_rx_flow_control(pdata);
4257c4158a5SRavi Kumar 
4267c4158a5SRavi Kumar 	return 0;
4277c4158a5SRavi Kumar }
4287c4158a5SRavi Kumar 
4297c4158a5SRavi Kumar static void axgbe_config_flow_control(struct axgbe_port *pdata)
4307c4158a5SRavi Kumar {
4317c4158a5SRavi Kumar 	axgbe_config_tx_flow_control(pdata);
4327c4158a5SRavi Kumar 	axgbe_config_rx_flow_control(pdata);
4337c4158a5SRavi Kumar 
4347c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
4357c4158a5SRavi Kumar }
4367c4158a5SRavi Kumar 
4377c4158a5SRavi Kumar static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,
4387c4158a5SRavi Kumar 					       unsigned int queue,
4397c4158a5SRavi Kumar 					       unsigned int q_fifo_size)
4407c4158a5SRavi Kumar {
4417c4158a5SRavi Kumar 	unsigned int frame_fifo_size;
4427c4158a5SRavi Kumar 	unsigned int rfa, rfd;
4437c4158a5SRavi Kumar 
4447c4158a5SRavi Kumar 	frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));
4457c4158a5SRavi Kumar 
4467c4158a5SRavi Kumar 	/* This path deals with just maximum frame sizes which are
4477c4158a5SRavi Kumar 	 * limited to a jumbo frame of 9,000 (plus headers, etc.)
4487c4158a5SRavi Kumar 	 * so we can never exceed the maximum allowable RFA/RFD
4497c4158a5SRavi Kumar 	 * values.
4507c4158a5SRavi Kumar 	 */
4517c4158a5SRavi Kumar 	if (q_fifo_size <= 2048) {
4527c4158a5SRavi Kumar 		/* rx_rfd to zero to signal no flow control */
4537c4158a5SRavi Kumar 		pdata->rx_rfa[queue] = 0;
4547c4158a5SRavi Kumar 		pdata->rx_rfd[queue] = 0;
4557c4158a5SRavi Kumar 		return;
4567c4158a5SRavi Kumar 	}
4577c4158a5SRavi Kumar 
4587c4158a5SRavi Kumar 	if (q_fifo_size <= 4096) {
4597c4158a5SRavi Kumar 		/* Between 2048 and 4096 */
4607c4158a5SRavi Kumar 		pdata->rx_rfa[queue] = 0;	/* Full - 1024 bytes */
4617c4158a5SRavi Kumar 		pdata->rx_rfd[queue] = 1;	/* Full - 1536 bytes */
4627c4158a5SRavi Kumar 		return;
4637c4158a5SRavi Kumar 	}
4647c4158a5SRavi Kumar 
4657c4158a5SRavi Kumar 	if (q_fifo_size <= frame_fifo_size) {
4667c4158a5SRavi Kumar 		/* Between 4096 and max-frame */
4677c4158a5SRavi Kumar 		pdata->rx_rfa[queue] = 2;	/* Full - 2048 bytes */
4687c4158a5SRavi Kumar 		pdata->rx_rfd[queue] = 5;	/* Full - 3584 bytes */
4697c4158a5SRavi Kumar 		return;
4707c4158a5SRavi Kumar 	}
4717c4158a5SRavi Kumar 
4727c4158a5SRavi Kumar 	if (q_fifo_size <= (frame_fifo_size * 3)) {
4737c4158a5SRavi Kumar 		/* Between max-frame and 3 max-frames,
4747c4158a5SRavi Kumar 		 * trigger if we get just over a frame of data and
4757c4158a5SRavi Kumar 		 * resume when we have just under half a frame left.
4767c4158a5SRavi Kumar 		 */
4777c4158a5SRavi Kumar 		rfa = q_fifo_size - frame_fifo_size;
4787c4158a5SRavi Kumar 		rfd = rfa + (frame_fifo_size / 2);
4797c4158a5SRavi Kumar 	} else {
4807c4158a5SRavi Kumar 		/* Above 3 max-frames - trigger when just over
4817c4158a5SRavi Kumar 		 * 2 frames of space available
4827c4158a5SRavi Kumar 		 */
4837c4158a5SRavi Kumar 		rfa = frame_fifo_size * 2;
4847c4158a5SRavi Kumar 		rfa += AXGMAC_FLOW_CONTROL_UNIT;
4857c4158a5SRavi Kumar 		rfd = rfa + frame_fifo_size;
4867c4158a5SRavi Kumar 	}
4877c4158a5SRavi Kumar 
4887c4158a5SRavi Kumar 	pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);
4897c4158a5SRavi Kumar 	pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);
4907c4158a5SRavi Kumar }
4917c4158a5SRavi Kumar 
4927c4158a5SRavi Kumar static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)
4937c4158a5SRavi Kumar {
4947c4158a5SRavi Kumar 	unsigned int q_fifo_size;
4957c4158a5SRavi Kumar 	unsigned int i;
4967c4158a5SRavi Kumar 
4977c4158a5SRavi Kumar 	for (i = 0; i < pdata->rx_q_count; i++) {
4987c4158a5SRavi Kumar 		q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;
4997c4158a5SRavi Kumar 
5007c4158a5SRavi Kumar 		axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
5017c4158a5SRavi Kumar 	}
5027c4158a5SRavi Kumar }
5037c4158a5SRavi Kumar 
5047c4158a5SRavi Kumar static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
5057c4158a5SRavi Kumar {
5067c4158a5SRavi Kumar 	unsigned int i;
5077c4158a5SRavi Kumar 
5087c4158a5SRavi Kumar 	for (i = 0; i < pdata->rx_q_count; i++) {
5097c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
5107c4158a5SRavi Kumar 					pdata->rx_rfa[i]);
5117c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
5127c4158a5SRavi Kumar 					pdata->rx_rfd[i]);
5137c4158a5SRavi Kumar 	}
5147c4158a5SRavi Kumar }
5157c4158a5SRavi Kumar 
51686578516SGirish Nandibasappa static int axgbe_enable_rx_vlan_stripping(struct axgbe_port *pdata)
51786578516SGirish Nandibasappa {
51886578516SGirish Nandibasappa 	/* Put the VLAN tag in the Rx descriptor */
51986578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
52086578516SGirish Nandibasappa 
52186578516SGirish Nandibasappa 	/* Don't check the VLAN type */
52286578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
52386578516SGirish Nandibasappa 
52486578516SGirish Nandibasappa 	/* Check only C-TAG (0x8100) packets */
52586578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
52686578516SGirish Nandibasappa 
52786578516SGirish Nandibasappa 	/* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
52886578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
52986578516SGirish Nandibasappa 
53086578516SGirish Nandibasappa 	/* Enable VLAN tag stripping */
53186578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
53286578516SGirish Nandibasappa 	return 0;
53386578516SGirish Nandibasappa }
53486578516SGirish Nandibasappa 
53586578516SGirish Nandibasappa static int axgbe_disable_rx_vlan_stripping(struct axgbe_port *pdata)
53686578516SGirish Nandibasappa {
53786578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
53886578516SGirish Nandibasappa 	return 0;
53986578516SGirish Nandibasappa }
54086578516SGirish Nandibasappa 
54186578516SGirish Nandibasappa static int axgbe_enable_rx_vlan_filtering(struct axgbe_port *pdata)
54286578516SGirish Nandibasappa {
54386578516SGirish Nandibasappa 	/* Enable VLAN filtering */
54486578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
54586578516SGirish Nandibasappa 
54686578516SGirish Nandibasappa 	/* Enable VLAN Hash Table filtering */
54786578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
54886578516SGirish Nandibasappa 
54986578516SGirish Nandibasappa 	/* Disable VLAN tag inverse matching */
55086578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
55186578516SGirish Nandibasappa 
55286578516SGirish Nandibasappa 	/* Only filter on the lower 12-bits of the VLAN tag */
55386578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
55486578516SGirish Nandibasappa 
55586578516SGirish Nandibasappa 	/* In order for the VLAN Hash Table filtering to be effective,
55686578516SGirish Nandibasappa 	 * the VLAN tag identifier in the VLAN Tag Register must not
55786578516SGirish Nandibasappa 	 * be zero.  Set the VLAN tag identifier to "1" to enable the
55886578516SGirish Nandibasappa 	 * VLAN Hash Table filtering.  This implies that a VLAN tag of
55986578516SGirish Nandibasappa 	 * 1 will always pass filtering.
56086578516SGirish Nandibasappa 	 */
56186578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
56286578516SGirish Nandibasappa 	return 0;
56386578516SGirish Nandibasappa }
56486578516SGirish Nandibasappa 
56586578516SGirish Nandibasappa static int axgbe_disable_rx_vlan_filtering(struct axgbe_port *pdata)
56686578516SGirish Nandibasappa {
56786578516SGirish Nandibasappa 	/* Disable VLAN filtering */
56886578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
56986578516SGirish Nandibasappa 	return 0;
57086578516SGirish Nandibasappa }
57186578516SGirish Nandibasappa 
57286578516SGirish Nandibasappa static u32 axgbe_vid_crc32_le(__le16 vid_le)
57386578516SGirish Nandibasappa {
57486578516SGirish Nandibasappa 	u32 poly = 0xedb88320;  /* CRCPOLY_LE */
57586578516SGirish Nandibasappa 	u32 crc = ~0;
57686578516SGirish Nandibasappa 	u32 temp = 0;
57786578516SGirish Nandibasappa 	unsigned char *data = (unsigned char *)&vid_le;
57886578516SGirish Nandibasappa 	unsigned char data_byte = 0;
57986578516SGirish Nandibasappa 	int i, bits;
58086578516SGirish Nandibasappa 
58186578516SGirish Nandibasappa 	bits = get_lastbit_set(VLAN_VID_MASK);
58286578516SGirish Nandibasappa 	for (i = 0; i < bits; i++) {
58386578516SGirish Nandibasappa 		if ((i % 8) == 0)
58486578516SGirish Nandibasappa 			data_byte = data[i / 8];
58586578516SGirish Nandibasappa 
58686578516SGirish Nandibasappa 		temp = ((crc & 1) ^ data_byte) & 1;
58786578516SGirish Nandibasappa 		crc >>= 1;
58886578516SGirish Nandibasappa 		data_byte >>= 1;
58986578516SGirish Nandibasappa 
59086578516SGirish Nandibasappa 		if (temp)
59186578516SGirish Nandibasappa 			crc ^= poly;
59286578516SGirish Nandibasappa 	}
59386578516SGirish Nandibasappa 	return crc;
59486578516SGirish Nandibasappa }
59586578516SGirish Nandibasappa 
59686578516SGirish Nandibasappa static int axgbe_update_vlan_hash_table(struct axgbe_port *pdata)
59786578516SGirish Nandibasappa {
59886578516SGirish Nandibasappa 	u32 crc = 0;
59986578516SGirish Nandibasappa 	u16 vid;
60086578516SGirish Nandibasappa 	__le16 vid_le = 0;
60186578516SGirish Nandibasappa 	u16 vlan_hash_table = 0;
60286578516SGirish Nandibasappa 	unsigned int reg = 0;
60386578516SGirish Nandibasappa 	unsigned long vid_idx, vid_valid;
60486578516SGirish Nandibasappa 
60586578516SGirish Nandibasappa 	/* Generate the VLAN Hash Table value */
60686578516SGirish Nandibasappa 	for (vid = 0; vid < VLAN_N_VID; vid++) {
60786578516SGirish Nandibasappa 		vid_idx = VLAN_TABLE_IDX(vid);
60886578516SGirish Nandibasappa 		vid_valid = pdata->active_vlans[vid_idx];
60986578516SGirish Nandibasappa 		vid_valid = (unsigned long)vid_valid >> (vid - (64 * vid_idx));
61086578516SGirish Nandibasappa 		if (vid_valid & 1)
61186578516SGirish Nandibasappa 			PMD_DRV_LOG(DEBUG,
61286578516SGirish Nandibasappa 				    "vid:%d pdata->active_vlans[%ld]=0x%lx\n",
61386578516SGirish Nandibasappa 				    vid, vid_idx, pdata->active_vlans[vid_idx]);
61486578516SGirish Nandibasappa 		else
61586578516SGirish Nandibasappa 			continue;
61686578516SGirish Nandibasappa 
61786578516SGirish Nandibasappa 		vid_le = rte_cpu_to_le_16(vid);
61886578516SGirish Nandibasappa 		crc = bitrev32(~axgbe_vid_crc32_le(vid_le)) >> 28;
61986578516SGirish Nandibasappa 		vlan_hash_table |= (1 << crc);
62086578516SGirish Nandibasappa 		PMD_DRV_LOG(DEBUG, "crc = %d vlan_hash_table = 0x%x\n",
62186578516SGirish Nandibasappa 			    crc, vlan_hash_table);
62286578516SGirish Nandibasappa 	}
62386578516SGirish Nandibasappa 	/* Set the VLAN Hash Table filtering register */
62486578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
62586578516SGirish Nandibasappa 	reg = AXGMAC_IOREAD(pdata, MAC_VLANHTR);
62686578516SGirish Nandibasappa 	PMD_DRV_LOG(DEBUG, "vlan_hash_table reg val = 0x%x\n", reg);
62786578516SGirish Nandibasappa 	return 0;
62886578516SGirish Nandibasappa }
62986578516SGirish Nandibasappa 
630572890efSRavi Kumar static int __axgbe_exit(struct axgbe_port *pdata)
631572890efSRavi Kumar {
632572890efSRavi Kumar 	unsigned int count = 2000;
633572890efSRavi Kumar 
634572890efSRavi Kumar 	/* Issue a software reset */
635572890efSRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
636572890efSRavi Kumar 	rte_delay_us(10);
637572890efSRavi Kumar 
638572890efSRavi Kumar 	/* Poll Until Poll Condition */
639572890efSRavi Kumar 	while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
640572890efSRavi Kumar 		rte_delay_us(500);
641572890efSRavi Kumar 
642572890efSRavi Kumar 	if (!count)
643572890efSRavi Kumar 		return -EBUSY;
644572890efSRavi Kumar 
645572890efSRavi Kumar 	return 0;
646572890efSRavi Kumar }
647572890efSRavi Kumar 
648572890efSRavi Kumar static int axgbe_exit(struct axgbe_port *pdata)
649572890efSRavi Kumar {
650572890efSRavi Kumar 	int ret;
651572890efSRavi Kumar 
652572890efSRavi Kumar 	/* To guard against possible incorrectly generated interrupts,
653572890efSRavi Kumar 	 * issue the software reset twice.
654572890efSRavi Kumar 	 */
655572890efSRavi Kumar 	ret = __axgbe_exit(pdata);
656572890efSRavi Kumar 	if (ret)
657572890efSRavi Kumar 		return ret;
658572890efSRavi Kumar 
659572890efSRavi Kumar 	return __axgbe_exit(pdata);
660572890efSRavi Kumar }
661572890efSRavi Kumar 
6627c4158a5SRavi Kumar static int axgbe_flush_tx_queues(struct axgbe_port *pdata)
6637c4158a5SRavi Kumar {
6647c4158a5SRavi Kumar 	unsigned int i, count;
6657c4158a5SRavi Kumar 
6667c4158a5SRavi Kumar 	if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
6677c4158a5SRavi Kumar 		return 0;
6687c4158a5SRavi Kumar 
6697c4158a5SRavi Kumar 	for (i = 0; i < pdata->tx_q_count; i++)
6707c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
6717c4158a5SRavi Kumar 
6727c4158a5SRavi Kumar 	/* Poll Until Poll Condition */
6737c4158a5SRavi Kumar 	for (i = 0; i < pdata->tx_q_count; i++) {
6747c4158a5SRavi Kumar 		count = 2000;
6757c4158a5SRavi Kumar 		while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,
6767c4158a5SRavi Kumar 							 MTL_Q_TQOMR, FTQ))
6777c4158a5SRavi Kumar 			rte_delay_us(500);
6787c4158a5SRavi Kumar 
6797c4158a5SRavi Kumar 		if (!count)
6807c4158a5SRavi Kumar 			return -EBUSY;
6817c4158a5SRavi Kumar 	}
6827c4158a5SRavi Kumar 
6837c4158a5SRavi Kumar 	return 0;
6847c4158a5SRavi Kumar }
6857c4158a5SRavi Kumar 
6867c4158a5SRavi Kumar static void axgbe_config_dma_bus(struct axgbe_port *pdata)
6877c4158a5SRavi Kumar {
6887c4158a5SRavi Kumar 	/* Set enhanced addressing mode */
6897c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
6907c4158a5SRavi Kumar 
6917c4158a5SRavi Kumar 	/* Out standing read/write requests*/
6927c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);
6937c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);
6947c4158a5SRavi Kumar 
6957c4158a5SRavi Kumar 	/* Set the System Bus mode */
6967c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
6977c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);
6987c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);
6997c4158a5SRavi Kumar }
7007c4158a5SRavi Kumar 
7017c4158a5SRavi Kumar static void axgbe_config_dma_cache(struct axgbe_port *pdata)
7027c4158a5SRavi Kumar {
7037c4158a5SRavi Kumar 	unsigned int arcache, awcache, arwcache;
7047c4158a5SRavi Kumar 
7057c4158a5SRavi Kumar 	arcache = 0;
7064e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0xf);
7074e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, 0xf);
7084e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, 0xf);
7097c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
7107c4158a5SRavi Kumar 
7117c4158a5SRavi Kumar 	awcache = 0;
7124e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0xf);
7134e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0xf);
7144e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0xf);
7154e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0xf);
7167c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
7177c4158a5SRavi Kumar 
7187c4158a5SRavi Kumar 	arwcache = 0;
7194e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0xf);
7204e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0xf);
7217c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
7227c4158a5SRavi Kumar }
7237c4158a5SRavi Kumar 
7247c4158a5SRavi Kumar static void axgbe_config_edma_control(struct axgbe_port *pdata)
7257c4158a5SRavi Kumar {
7267c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);
7277c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);
7287c4158a5SRavi Kumar }
7297c4158a5SRavi Kumar 
7307c4158a5SRavi Kumar static int axgbe_config_osp_mode(struct axgbe_port *pdata)
7317c4158a5SRavi Kumar {
7327c4158a5SRavi Kumar 	/* Force DMA to operate on second packet before closing descriptors
7337c4158a5SRavi Kumar 	 *  of first packet
7347c4158a5SRavi Kumar 	 */
7357c4158a5SRavi Kumar 	struct axgbe_tx_queue *txq;
7367c4158a5SRavi Kumar 	unsigned int i;
7377c4158a5SRavi Kumar 
7387c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
7397c4158a5SRavi Kumar 		txq = pdata->eth_dev->data->tx_queues[i];
7407c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,
7417c4158a5SRavi Kumar 					pdata->tx_osp_mode);
7427c4158a5SRavi Kumar 	}
7437c4158a5SRavi Kumar 
7447c4158a5SRavi Kumar 	return 0;
7457c4158a5SRavi Kumar }
7467c4158a5SRavi Kumar 
7477c4158a5SRavi Kumar static int axgbe_config_pblx8(struct axgbe_port *pdata)
7487c4158a5SRavi Kumar {
7497c4158a5SRavi Kumar 	struct axgbe_tx_queue *txq;
7507c4158a5SRavi Kumar 	unsigned int i;
7517c4158a5SRavi Kumar 
7527c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
7537c4158a5SRavi Kumar 		txq = pdata->eth_dev->data->tx_queues[i];
7547c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,
7557c4158a5SRavi Kumar 					pdata->pblx8);
7567c4158a5SRavi Kumar 	}
7577c4158a5SRavi Kumar 	return 0;
7587c4158a5SRavi Kumar }
7597c4158a5SRavi Kumar 
7607c4158a5SRavi Kumar static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)
7617c4158a5SRavi Kumar {
7627c4158a5SRavi Kumar 	struct axgbe_tx_queue *txq;
7637c4158a5SRavi Kumar 	unsigned int i;
7647c4158a5SRavi Kumar 
7657c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
7667c4158a5SRavi Kumar 		txq = pdata->eth_dev->data->tx_queues[i];
7677c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,
7687c4158a5SRavi Kumar 				pdata->tx_pbl);
7697c4158a5SRavi Kumar 	}
7707c4158a5SRavi Kumar 
7717c4158a5SRavi Kumar 	return 0;
7727c4158a5SRavi Kumar }
7737c4158a5SRavi Kumar 
7747c4158a5SRavi Kumar static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)
7757c4158a5SRavi Kumar {
7767c4158a5SRavi Kumar 	struct axgbe_rx_queue *rxq;
7777c4158a5SRavi Kumar 	unsigned int i;
7787c4158a5SRavi Kumar 
7797c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
7807c4158a5SRavi Kumar 		rxq = pdata->eth_dev->data->rx_queues[i];
7817c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,
7827c4158a5SRavi Kumar 				pdata->rx_pbl);
7837c4158a5SRavi Kumar 	}
7847c4158a5SRavi Kumar 
7857c4158a5SRavi Kumar 	return 0;
7867c4158a5SRavi Kumar }
7877c4158a5SRavi Kumar 
7887c4158a5SRavi Kumar static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)
7897c4158a5SRavi Kumar {
7907c4158a5SRavi Kumar 	struct axgbe_rx_queue *rxq;
7917c4158a5SRavi Kumar 	unsigned int i;
7927c4158a5SRavi Kumar 
7937c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
7947c4158a5SRavi Kumar 		rxq = pdata->eth_dev->data->rx_queues[i];
7957c4158a5SRavi Kumar 
7967c4158a5SRavi Kumar 		rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
7977c4158a5SRavi Kumar 			RTE_PKTMBUF_HEADROOM;
7987c4158a5SRavi Kumar 		rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &
7997c4158a5SRavi Kumar 			~(AXGBE_RX_BUF_ALIGN - 1);
8007c4158a5SRavi Kumar 
8017c4158a5SRavi Kumar 		if (rxq->buf_size > pdata->rx_buf_size)
8027c4158a5SRavi Kumar 			pdata->rx_buf_size = rxq->buf_size;
8037c4158a5SRavi Kumar 
8047c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,
8057c4158a5SRavi Kumar 					rxq->buf_size);
8067c4158a5SRavi Kumar 	}
8077c4158a5SRavi Kumar }
8087c4158a5SRavi Kumar 
8097c4158a5SRavi Kumar static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,
8107c4158a5SRavi Kumar 			       unsigned int index, unsigned int val)
8117c4158a5SRavi Kumar {
8127c4158a5SRavi Kumar 	unsigned int wait;
8137c4158a5SRavi Kumar 
8147c4158a5SRavi Kumar 	if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
8157c4158a5SRavi Kumar 		return -EBUSY;
8167c4158a5SRavi Kumar 
8177c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_RSSDR, val);
8187c4158a5SRavi Kumar 
8197c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
8207c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
8217c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
8227c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
8237c4158a5SRavi Kumar 
8247c4158a5SRavi Kumar 	wait = 1000;
8257c4158a5SRavi Kumar 	while (wait--) {
8267c4158a5SRavi Kumar 		if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
8277c4158a5SRavi Kumar 			return 0;
8287c4158a5SRavi Kumar 
8297c4158a5SRavi Kumar 		rte_delay_us(1500);
8307c4158a5SRavi Kumar 	}
8317c4158a5SRavi Kumar 
8327c4158a5SRavi Kumar 	return -EBUSY;
8337c4158a5SRavi Kumar }
8347c4158a5SRavi Kumar 
83576d7664dSChandu Babu N int axgbe_write_rss_hash_key(struct axgbe_port *pdata)
8367c4158a5SRavi Kumar {
8377c4158a5SRavi Kumar 	struct rte_eth_rss_conf *rss_conf;
8387c4158a5SRavi Kumar 	unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
8397c4158a5SRavi Kumar 	unsigned int *key;
8407c4158a5SRavi Kumar 	int ret;
8417c4158a5SRavi Kumar 
8427c4158a5SRavi Kumar 	rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
8437c4158a5SRavi Kumar 
8447c4158a5SRavi Kumar 	if (!rss_conf->rss_key)
8457c4158a5SRavi Kumar 		key = (unsigned int *)&pdata->rss_key;
8467c4158a5SRavi Kumar 	else
8477c4158a5SRavi Kumar 		key = (unsigned int *)&rss_conf->rss_key;
8487c4158a5SRavi Kumar 
8497c4158a5SRavi Kumar 	while (key_regs--) {
8507c4158a5SRavi Kumar 		ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,
8517c4158a5SRavi Kumar 					  key_regs, *key++);
8527c4158a5SRavi Kumar 		if (ret)
8537c4158a5SRavi Kumar 			return ret;
8547c4158a5SRavi Kumar 	}
8557c4158a5SRavi Kumar 
8567c4158a5SRavi Kumar 	return 0;
8577c4158a5SRavi Kumar }
8587c4158a5SRavi Kumar 
85976d7664dSChandu Babu N int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
8607c4158a5SRavi Kumar {
8617c4158a5SRavi Kumar 	unsigned int i;
8627c4158a5SRavi Kumar 	int ret;
8637c4158a5SRavi Kumar 
8647c4158a5SRavi Kumar 	for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
8657c4158a5SRavi Kumar 		ret = axgbe_write_rss_reg(pdata,
8667c4158a5SRavi Kumar 					  AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
8677c4158a5SRavi Kumar 					  pdata->rss_table[i]);
8687c4158a5SRavi Kumar 		if (ret)
8697c4158a5SRavi Kumar 			return ret;
8707c4158a5SRavi Kumar 	}
8717c4158a5SRavi Kumar 
8727c4158a5SRavi Kumar 	return 0;
8737c4158a5SRavi Kumar }
8747c4158a5SRavi Kumar 
8757c4158a5SRavi Kumar static int axgbe_enable_rss(struct axgbe_port *pdata)
8767c4158a5SRavi Kumar {
8777c4158a5SRavi Kumar 	int ret;
8787c4158a5SRavi Kumar 
8797c4158a5SRavi Kumar 	/* Program the hash key */
8807c4158a5SRavi Kumar 	ret = axgbe_write_rss_hash_key(pdata);
8817c4158a5SRavi Kumar 	if (ret)
8827c4158a5SRavi Kumar 		return ret;
8837c4158a5SRavi Kumar 
8847c4158a5SRavi Kumar 	/* Program the lookup table */
8857c4158a5SRavi Kumar 	ret = axgbe_write_rss_lookup_table(pdata);
8867c4158a5SRavi Kumar 	if (ret)
8877c4158a5SRavi Kumar 		return ret;
8887c4158a5SRavi Kumar 
8897c4158a5SRavi Kumar 	/* Set the RSS options */
8907c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
8917c4158a5SRavi Kumar 
8927c4158a5SRavi Kumar 	/* Enable RSS */
8937c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
8947c4158a5SRavi Kumar 
8957c4158a5SRavi Kumar 	return 0;
8967c4158a5SRavi Kumar }
8977c4158a5SRavi Kumar 
8987c4158a5SRavi Kumar static void axgbe_rss_options(struct axgbe_port *pdata)
8997c4158a5SRavi Kumar {
9007c4158a5SRavi Kumar 	struct rte_eth_rss_conf *rss_conf;
9017c4158a5SRavi Kumar 	uint64_t rss_hf;
9027c4158a5SRavi Kumar 
9037c4158a5SRavi Kumar 	rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
90476d7664dSChandu Babu N 	pdata->rss_hf = rss_conf->rss_hf;
9057c4158a5SRavi Kumar 	rss_hf = rss_conf->rss_hf;
9067c4158a5SRavi Kumar 
907295968d1SFerruh Yigit 	if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
9087c4158a5SRavi Kumar 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
909295968d1SFerruh Yigit 	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
9107c4158a5SRavi Kumar 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
911295968d1SFerruh Yigit 	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
9127c4158a5SRavi Kumar 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
9137c4158a5SRavi Kumar }
9147c4158a5SRavi Kumar 
9157c4158a5SRavi Kumar static int axgbe_config_rss(struct axgbe_port *pdata)
9167c4158a5SRavi Kumar {
9177c4158a5SRavi Kumar 	uint32_t i;
9187c4158a5SRavi Kumar 
9197c4158a5SRavi Kumar 	if (pdata->rss_enable) {
9207c4158a5SRavi Kumar 		/* Initialize RSS hash key and lookup table */
9217c4158a5SRavi Kumar 		uint32_t *key = (uint32_t *)pdata->rss_key;
9227c4158a5SRavi Kumar 
9237c4158a5SRavi Kumar 		for (i = 0; i < sizeof(pdata->rss_key) / 4; i++)
9247c4158a5SRavi Kumar 			*key++ = (uint32_t)rte_rand();
9257c4158a5SRavi Kumar 		for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)
9267c4158a5SRavi Kumar 			AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
9277c4158a5SRavi Kumar 					i % pdata->eth_dev->data->nb_rx_queues);
9287c4158a5SRavi Kumar 		axgbe_rss_options(pdata);
9297c4158a5SRavi Kumar 		if (axgbe_enable_rss(pdata)) {
9307c4158a5SRavi Kumar 			PMD_DRV_LOG(ERR, "Error in enabling RSS support\n");
9317c4158a5SRavi Kumar 			return -1;
9327c4158a5SRavi Kumar 		}
9337c4158a5SRavi Kumar 	} else {
9347c4158a5SRavi Kumar 		AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
9357c4158a5SRavi Kumar 	}
9367c4158a5SRavi Kumar 
9377c4158a5SRavi Kumar 	return 0;
9387c4158a5SRavi Kumar }
9397c4158a5SRavi Kumar 
9407c4158a5SRavi Kumar static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)
9417c4158a5SRavi Kumar {
9427c4158a5SRavi Kumar 	struct axgbe_tx_queue *txq;
9437c4158a5SRavi Kumar 	unsigned int dma_ch_isr, dma_ch_ier;
9447c4158a5SRavi Kumar 	unsigned int i;
9457c4158a5SRavi Kumar 
9467c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
9477c4158a5SRavi Kumar 		txq = pdata->eth_dev->data->tx_queues[i];
9487c4158a5SRavi Kumar 
9497c4158a5SRavi Kumar 		/* Clear all the interrupts which are set */
9507c4158a5SRavi Kumar 		dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);
9517c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);
9527c4158a5SRavi Kumar 
9537c4158a5SRavi Kumar 		/* Clear all interrupt enable bits */
9547c4158a5SRavi Kumar 		dma_ch_ier = 0;
9557c4158a5SRavi Kumar 
9567c4158a5SRavi Kumar 		/* Enable following interrupts
9577c4158a5SRavi Kumar 		 *   NIE  - Normal Interrupt Summary Enable
9587c4158a5SRavi Kumar 		 *   AIE  - Abnormal Interrupt Summary Enable
9597c4158a5SRavi Kumar 		 *   FBEE - Fatal Bus Error Enable
9607c4158a5SRavi Kumar 		 */
9617c4158a5SRavi Kumar 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);
9627c4158a5SRavi Kumar 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
9637c4158a5SRavi Kumar 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
9647c4158a5SRavi Kumar 
9657c4158a5SRavi Kumar 		/* Enable following Rx interrupts
9667c4158a5SRavi Kumar 		 *   RBUE - Receive Buffer Unavailable Enable
9677c4158a5SRavi Kumar 		 *   RIE  - Receive Interrupt Enable (unless using
9687c4158a5SRavi Kumar 		 *          per channel interrupts in edge triggered
9697c4158a5SRavi Kumar 		 *          mode)
9707c4158a5SRavi Kumar 		 */
9717c4158a5SRavi Kumar 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
9727c4158a5SRavi Kumar 
9737c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);
9747c4158a5SRavi Kumar 	}
9757c4158a5SRavi Kumar }
9767c4158a5SRavi Kumar 
9777c4158a5SRavi Kumar static void wrapper_tx_desc_init(struct axgbe_port *pdata)
9787c4158a5SRavi Kumar {
9797c4158a5SRavi Kumar 	struct axgbe_tx_queue *txq;
9807c4158a5SRavi Kumar 	unsigned int i;
9817c4158a5SRavi Kumar 
9827c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
9837c4158a5SRavi Kumar 		txq = pdata->eth_dev->data->tx_queues[i];
9847c4158a5SRavi Kumar 		txq->cur = 0;
9857c4158a5SRavi Kumar 		txq->dirty = 0;
9867c4158a5SRavi Kumar 		/* Update the total number of Tx descriptors */
9877c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);
9887c4158a5SRavi Kumar 		/* Update the starting address of descriptor ring */
9897c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,
9907c4158a5SRavi Kumar 					high32_value(txq->ring_phys_addr));
9917c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,
9927c4158a5SRavi Kumar 					low32_value(txq->ring_phys_addr));
9937c4158a5SRavi Kumar 	}
9947c4158a5SRavi Kumar }
9957c4158a5SRavi Kumar 
9967c4158a5SRavi Kumar static int wrapper_rx_desc_init(struct axgbe_port *pdata)
9977c4158a5SRavi Kumar {
9987c4158a5SRavi Kumar 	struct axgbe_rx_queue *rxq;
9997c4158a5SRavi Kumar 	struct rte_mbuf *mbuf;
10007c4158a5SRavi Kumar 	volatile union axgbe_rx_desc *desc;
10017c4158a5SRavi Kumar 	unsigned int i, j;
10027c4158a5SRavi Kumar 
10037c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
10047c4158a5SRavi Kumar 		rxq = pdata->eth_dev->data->rx_queues[i];
10057c4158a5SRavi Kumar 
10067c4158a5SRavi Kumar 		/* Initialize software ring entries */
10077c4158a5SRavi Kumar 		rxq->mbuf_alloc = 0;
10087c4158a5SRavi Kumar 		rxq->cur = 0;
10097c4158a5SRavi Kumar 		rxq->dirty = 0;
10107c4158a5SRavi Kumar 		desc = AXGBE_GET_DESC_PT(rxq, 0);
10117c4158a5SRavi Kumar 
10127c4158a5SRavi Kumar 		for (j = 0; j < rxq->nb_desc; j++) {
10137c4158a5SRavi Kumar 			mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
10147c4158a5SRavi Kumar 			if (mbuf == NULL) {
10157c4158a5SRavi Kumar 				PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
10167c4158a5SRavi Kumar 					    (unsigned int)rxq->queue_id, j);
10177483341aSXueming Li 				axgbe_dev_rx_queue_release(pdata->eth_dev, i);
10187c4158a5SRavi Kumar 				return -ENOMEM;
10197c4158a5SRavi Kumar 			}
10207c4158a5SRavi Kumar 			rxq->sw_ring[j] = mbuf;
10217c4158a5SRavi Kumar 			/* Mbuf populate */
10227c4158a5SRavi Kumar 			mbuf->next = NULL;
10237c4158a5SRavi Kumar 			mbuf->data_off = RTE_PKTMBUF_HEADROOM;
10247c4158a5SRavi Kumar 			mbuf->nb_segs = 1;
10257c4158a5SRavi Kumar 			mbuf->port = rxq->port_id;
10267c4158a5SRavi Kumar 			desc->read.baddr =
10277c4158a5SRavi Kumar 				rte_cpu_to_le_64(
10287c4158a5SRavi Kumar 					rte_mbuf_data_iova_default(mbuf));
10297c4158a5SRavi Kumar 			rte_wmb();
10307c4158a5SRavi Kumar 			AXGMAC_SET_BITS_LE(desc->read.desc3,
10317c4158a5SRavi Kumar 						RX_NORMAL_DESC3, OWN, 1);
10327c4158a5SRavi Kumar 			rte_wmb();
10337c4158a5SRavi Kumar 			rxq->mbuf_alloc++;
10347c4158a5SRavi Kumar 			desc++;
10357c4158a5SRavi Kumar 		}
10367c4158a5SRavi Kumar 		/* Update the total number of Rx descriptors */
10377c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,
10387c4158a5SRavi Kumar 					rxq->nb_desc - 1);
10397c4158a5SRavi Kumar 		/* Update the starting address of descriptor ring */
10407c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,
10417c4158a5SRavi Kumar 					high32_value(rxq->ring_phys_addr));
10427c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,
10437c4158a5SRavi Kumar 					low32_value(rxq->ring_phys_addr));
10447c4158a5SRavi Kumar 		/* Update the Rx Descriptor Tail Pointer */
10457c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
10467c4158a5SRavi Kumar 				   low32_value(rxq->ring_phys_addr +
10477c4158a5SRavi Kumar 				   (rxq->nb_desc - 1) *
10487c4158a5SRavi Kumar 				   sizeof(union axgbe_rx_desc)));
10497c4158a5SRavi Kumar 	}
10507c4158a5SRavi Kumar 	return 0;
10517c4158a5SRavi Kumar }
10527c4158a5SRavi Kumar 
10537c4158a5SRavi Kumar static void axgbe_config_mtl_mode(struct axgbe_port *pdata)
10547c4158a5SRavi Kumar {
10557c4158a5SRavi Kumar 	unsigned int i;
10567c4158a5SRavi Kumar 
10577c4158a5SRavi Kumar 	/* Set Tx to weighted round robin scheduling algorithm */
10587c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
10597c4158a5SRavi Kumar 
10607c4158a5SRavi Kumar 	/* Set Tx traffic classes to use WRR algorithm with equal weights */
10617c4158a5SRavi Kumar 	for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
10627c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
10637c4158a5SRavi Kumar 				MTL_TSA_ETS);
10647c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
10657c4158a5SRavi Kumar 	}
10667c4158a5SRavi Kumar 
10677c4158a5SRavi Kumar 	/* Set Rx to strict priority algorithm */
10687c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
10697c4158a5SRavi Kumar }
10707c4158a5SRavi Kumar 
10717c4158a5SRavi Kumar static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)
10727c4158a5SRavi Kumar {
10737c4158a5SRavi Kumar 	unsigned int i;
10747c4158a5SRavi Kumar 
10757c4158a5SRavi Kumar 	for (i = 0; i < pdata->tx_q_count; i++)
10767c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
10777c4158a5SRavi Kumar 
10787c4158a5SRavi Kumar 	return 0;
10797c4158a5SRavi Kumar }
10807c4158a5SRavi Kumar 
10817c4158a5SRavi Kumar static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)
10827c4158a5SRavi Kumar {
10837c4158a5SRavi Kumar 	unsigned int i;
10847c4158a5SRavi Kumar 
10857c4158a5SRavi Kumar 	for (i = 0; i < pdata->rx_q_count; i++)
10867c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
10877c4158a5SRavi Kumar 
10887c4158a5SRavi Kumar 	return 0;
10897c4158a5SRavi Kumar }
10907c4158a5SRavi Kumar 
10917c4158a5SRavi Kumar static int axgbe_config_tx_threshold(struct axgbe_port *pdata,
10927c4158a5SRavi Kumar 				     unsigned int val)
10937c4158a5SRavi Kumar {
10947c4158a5SRavi Kumar 	unsigned int i;
10957c4158a5SRavi Kumar 
10967c4158a5SRavi Kumar 	for (i = 0; i < pdata->tx_q_count; i++)
10977c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
10987c4158a5SRavi Kumar 
10997c4158a5SRavi Kumar 	return 0;
11007c4158a5SRavi Kumar }
11017c4158a5SRavi Kumar 
11027c4158a5SRavi Kumar static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
11037c4158a5SRavi Kumar 				     unsigned int val)
11047c4158a5SRavi Kumar {
11057c4158a5SRavi Kumar 	unsigned int i;
11067c4158a5SRavi Kumar 
11077c4158a5SRavi Kumar 	for (i = 0; i < pdata->rx_q_count; i++)
11087c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
11097c4158a5SRavi Kumar 
11107c4158a5SRavi Kumar 	return 0;
11117c4158a5SRavi Kumar }
11127c4158a5SRavi Kumar 
11137be78d02SJosh Soref /* Distributing FIFO size */
11147c4158a5SRavi Kumar static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
11157c4158a5SRavi Kumar {
11167c4158a5SRavi Kumar 	unsigned int fifo_size;
11177c4158a5SRavi Kumar 	unsigned int q_fifo_size;
11187c4158a5SRavi Kumar 	unsigned int p_fifo, i;
11197c4158a5SRavi Kumar 
11207c4158a5SRavi Kumar 	fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
11217c4158a5SRavi Kumar 			  pdata->hw_feat.rx_fifo_size);
11227c4158a5SRavi Kumar 	q_fifo_size = fifo_size / pdata->rx_q_count;
11237c4158a5SRavi Kumar 
11247c4158a5SRavi Kumar 	/* Calculate the fifo setting by dividing the queue's fifo size
11257c4158a5SRavi Kumar 	 * by the fifo allocation increment (with 0 representing the
11267c4158a5SRavi Kumar 	 * base allocation increment so decrement the result
11277c4158a5SRavi Kumar 	 * by 1).
11287c4158a5SRavi Kumar 	 */
11297c4158a5SRavi Kumar 	p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
11307c4158a5SRavi Kumar 	if (p_fifo)
11317c4158a5SRavi Kumar 		p_fifo--;
11327c4158a5SRavi Kumar 
11337c4158a5SRavi Kumar 	for (i = 0; i < pdata->rx_q_count; i++)
11347c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);
11357c4158a5SRavi Kumar 	pdata->fifo = p_fifo;
11367c4158a5SRavi Kumar 
11377c4158a5SRavi Kumar 	/*Calculate and config Flow control threshold*/
11387c4158a5SRavi Kumar 	axgbe_calculate_flow_control_threshold(pdata);
11397c4158a5SRavi Kumar 	axgbe_config_flow_control_threshold(pdata);
11404216cdc0SChandu Babu N 
11414216cdc0SChandu Babu N 	PMD_DRV_LOG(DEBUG, "%d Rx hardware queues, %d byte fifo per queue\n",
11424216cdc0SChandu Babu N 		    pdata->rx_q_count, q_fifo_size);
11437c4158a5SRavi Kumar }
11447c4158a5SRavi Kumar 
11457c4158a5SRavi Kumar static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
11467c4158a5SRavi Kumar {
11477c4158a5SRavi Kumar 	unsigned int fifo_size;
11487c4158a5SRavi Kumar 	unsigned int q_fifo_size;
11497c4158a5SRavi Kumar 	unsigned int p_fifo, i;
11507c4158a5SRavi Kumar 
11517c4158a5SRavi Kumar 	fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
11527c4158a5SRavi Kumar 				pdata->hw_feat.tx_fifo_size);
11537c4158a5SRavi Kumar 	q_fifo_size = fifo_size / pdata->tx_q_count;
11547c4158a5SRavi Kumar 
11557c4158a5SRavi Kumar 	/* Calculate the fifo setting by dividing the queue's fifo size
11567c4158a5SRavi Kumar 	 * by the fifo allocation increment (with 0 representing the
11577c4158a5SRavi Kumar 	 * base allocation increment so decrement the result
11587c4158a5SRavi Kumar 	 * by 1).
11597c4158a5SRavi Kumar 	 */
11607c4158a5SRavi Kumar 	p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
11617c4158a5SRavi Kumar 	if (p_fifo)
11627c4158a5SRavi Kumar 		p_fifo--;
11637c4158a5SRavi Kumar 
11647c4158a5SRavi Kumar 	for (i = 0; i < pdata->tx_q_count; i++)
11657c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
11664216cdc0SChandu Babu N 
11674216cdc0SChandu Babu N 	PMD_DRV_LOG(DEBUG, "%d Tx hardware queues, %d byte fifo per queue\n",
11684216cdc0SChandu Babu N 		    pdata->tx_q_count, q_fifo_size);
11697c4158a5SRavi Kumar }
11707c4158a5SRavi Kumar 
11717c4158a5SRavi Kumar static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
11727c4158a5SRavi Kumar {
11737c4158a5SRavi Kumar 	unsigned int qptc, qptc_extra, queue;
11747c4158a5SRavi Kumar 	unsigned int i, j, reg, reg_val;
11757c4158a5SRavi Kumar 
11767c4158a5SRavi Kumar 	/* Map the MTL Tx Queues to Traffic Classes
11777c4158a5SRavi Kumar 	 *   Note: Tx Queues >= Traffic Classes
11787c4158a5SRavi Kumar 	 */
11797c4158a5SRavi Kumar 	qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
11807c4158a5SRavi Kumar 	qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
11817c4158a5SRavi Kumar 
11827c4158a5SRavi Kumar 	for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
11834216cdc0SChandu Babu N 		for (j = 0; j < qptc; j++) {
11844216cdc0SChandu Babu N 			PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i);
11857c4158a5SRavi Kumar 			AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
11867c4158a5SRavi Kumar 						Q2TCMAP, i);
11874216cdc0SChandu Babu N 		}
11884216cdc0SChandu Babu N 		if (i < qptc_extra) {
11894216cdc0SChandu Babu N 			PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i);
11907c4158a5SRavi Kumar 			AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
11917c4158a5SRavi Kumar 						Q2TCMAP, i);
11927c4158a5SRavi Kumar 		}
11934216cdc0SChandu Babu N 	}
11947c4158a5SRavi Kumar 
11957c4158a5SRavi Kumar 	if (pdata->rss_enable) {
11967c4158a5SRavi Kumar 		/* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
11977c4158a5SRavi Kumar 		reg = MTL_RQDCM0R;
11987c4158a5SRavi Kumar 		reg_val = 0;
11997c4158a5SRavi Kumar 		for (i = 0; i < pdata->rx_q_count;) {
12007c4158a5SRavi Kumar 			reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
12017c4158a5SRavi Kumar 
12027c4158a5SRavi Kumar 			if ((i % MTL_RQDCM_Q_PER_REG) &&
12037c4158a5SRavi Kumar 			    (i != pdata->rx_q_count))
12047c4158a5SRavi Kumar 				continue;
12057c4158a5SRavi Kumar 
12067c4158a5SRavi Kumar 			AXGMAC_IOWRITE(pdata, reg, reg_val);
12077c4158a5SRavi Kumar 
12087c4158a5SRavi Kumar 			reg += MTL_RQDCM_INC;
12097c4158a5SRavi Kumar 			reg_val = 0;
12107c4158a5SRavi Kumar 		}
12117c4158a5SRavi Kumar 	}
12127c4158a5SRavi Kumar }
12137c4158a5SRavi Kumar 
12147c4158a5SRavi Kumar static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
12157c4158a5SRavi Kumar {
12167c4158a5SRavi Kumar 	unsigned int mtl_q_isr;
12177c4158a5SRavi Kumar 	unsigned int q_count, i;
12187c4158a5SRavi Kumar 
12197c4158a5SRavi Kumar 	q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
12207c4158a5SRavi Kumar 	for (i = 0; i < q_count; i++) {
12217c4158a5SRavi Kumar 		/* Clear all the interrupts which are set */
12227c4158a5SRavi Kumar 		mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
12237c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
12247c4158a5SRavi Kumar 
12257c4158a5SRavi Kumar 		/* No MTL interrupts to be enabled */
12267c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
12277c4158a5SRavi Kumar 	}
12287c4158a5SRavi Kumar }
12297c4158a5SRavi Kumar 
1230e01d9b2eSChandu Babu N static uint32_t crc32_le(uint32_t crc, uint8_t *p, uint32_t len)
1231e01d9b2eSChandu Babu N {
1232e01d9b2eSChandu Babu N 	int i;
1233e01d9b2eSChandu Babu N 	while (len--) {
1234e01d9b2eSChandu Babu N 		crc ^= *p++;
1235e01d9b2eSChandu Babu N 		for (i = 0; i < 8; i++)
1236e01d9b2eSChandu Babu N 			crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
1237e01d9b2eSChandu Babu N 	}
1238e01d9b2eSChandu Babu N 	return crc;
1239e01d9b2eSChandu Babu N }
1240e01d9b2eSChandu Babu N 
1241e01d9b2eSChandu Babu N void axgbe_set_mac_hash_table(struct axgbe_port *pdata, u8 *addr, bool add)
1242e01d9b2eSChandu Babu N {
1243e01d9b2eSChandu Babu N 	uint32_t crc, htable_index, htable_bitmask;
1244e01d9b2eSChandu Babu N 
1245e01d9b2eSChandu Babu N 	crc = bitrev32(~crc32_le(~0, addr, RTE_ETHER_ADDR_LEN));
1246e01d9b2eSChandu Babu N 	crc >>= pdata->hash_table_shift;
1247e01d9b2eSChandu Babu N 	htable_index = crc >> 5;
1248e01d9b2eSChandu Babu N 	htable_bitmask = 1 << (crc & 0x1f);
1249e01d9b2eSChandu Babu N 
1250e01d9b2eSChandu Babu N 	if (add) {
1251e01d9b2eSChandu Babu N 		pdata->uc_hash_table[htable_index] |= htable_bitmask;
1252e01d9b2eSChandu Babu N 		pdata->uc_hash_mac_addr++;
1253e01d9b2eSChandu Babu N 	} else {
1254e01d9b2eSChandu Babu N 		pdata->uc_hash_table[htable_index] &= ~htable_bitmask;
1255e01d9b2eSChandu Babu N 		pdata->uc_hash_mac_addr--;
1256e01d9b2eSChandu Babu N 	}
1257e01d9b2eSChandu Babu N 	PMD_DRV_LOG(DEBUG, "%s MAC hash table Bit %d at Index %#x\n",
1258e01d9b2eSChandu Babu N 		    add ? "set" : "clear", (crc & 0x1f), htable_index);
1259e01d9b2eSChandu Babu N 
1260e01d9b2eSChandu Babu N 	AXGMAC_IOWRITE(pdata, MAC_HTR(htable_index),
1261e01d9b2eSChandu Babu N 		       pdata->uc_hash_table[htable_index]);
1262e01d9b2eSChandu Babu N }
1263e01d9b2eSChandu Babu N 
126449a5e622SChandu Babu N void axgbe_set_mac_addn_addr(struct axgbe_port *pdata, u8 *addr, uint32_t index)
126549a5e622SChandu Babu N {
126649a5e622SChandu Babu N 	unsigned int mac_addr_hi, mac_addr_lo;
126749a5e622SChandu Babu N 	u8 *mac_addr;
126849a5e622SChandu Babu N 
126949a5e622SChandu Babu N 	mac_addr_lo = 0;
127049a5e622SChandu Babu N 	mac_addr_hi = 0;
127149a5e622SChandu Babu N 
127249a5e622SChandu Babu N 	if (addr) {
127349a5e622SChandu Babu N 		mac_addr = (u8 *)&mac_addr_lo;
127449a5e622SChandu Babu N 		mac_addr[0] = addr[0];
127549a5e622SChandu Babu N 		mac_addr[1] = addr[1];
127649a5e622SChandu Babu N 		mac_addr[2] = addr[2];
127749a5e622SChandu Babu N 		mac_addr[3] = addr[3];
127849a5e622SChandu Babu N 		mac_addr = (u8 *)&mac_addr_hi;
127949a5e622SChandu Babu N 		mac_addr[0] = addr[4];
128049a5e622SChandu Babu N 		mac_addr[1] = addr[5];
128149a5e622SChandu Babu N 
128249a5e622SChandu Babu N 		/*Address Enable: Use this Addr for Perfect Filtering */
128349a5e622SChandu Babu N 		AXGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
128449a5e622SChandu Babu N 	}
128549a5e622SChandu Babu N 
128649a5e622SChandu Babu N 	PMD_DRV_LOG(DEBUG, "%s mac address at %#x\n",
128749a5e622SChandu Babu N 		    addr ? "set" : "clear", index);
128849a5e622SChandu Babu N 
128949a5e622SChandu Babu N 	AXGMAC_IOWRITE(pdata, MAC_MACAHR(index), mac_addr_hi);
129049a5e622SChandu Babu N 	AXGMAC_IOWRITE(pdata, MAC_MACALR(index), mac_addr_lo);
129149a5e622SChandu Babu N }
129249a5e622SChandu Babu N 
12937c4158a5SRavi Kumar static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)
12947c4158a5SRavi Kumar {
12957c4158a5SRavi Kumar 	unsigned int mac_addr_hi, mac_addr_lo;
12967c4158a5SRavi Kumar 
12977c4158a5SRavi Kumar 	mac_addr_hi = (addr[5] <<  8) | (addr[4] <<  0);
12987c4158a5SRavi Kumar 	mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
12997c4158a5SRavi Kumar 		(addr[1] <<  8) | (addr[0] <<  0);
13007c4158a5SRavi Kumar 
13017c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
13027c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
13037c4158a5SRavi Kumar 
13047c4158a5SRavi Kumar 	return 0;
13057c4158a5SRavi Kumar }
13067c4158a5SRavi Kumar 
1307e01d9b2eSChandu Babu N static void axgbe_config_mac_hash_table(struct axgbe_port *pdata)
1308e01d9b2eSChandu Babu N {
1309e01d9b2eSChandu Babu N 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1310e01d9b2eSChandu Babu N 
1311e01d9b2eSChandu Babu N 	pdata->hash_table_shift = 0;
1312e01d9b2eSChandu Babu N 	pdata->hash_table_count = 0;
1313e01d9b2eSChandu Babu N 	pdata->uc_hash_mac_addr = 0;
1314e01d9b2eSChandu Babu N 	memset(pdata->uc_hash_table, 0, sizeof(pdata->uc_hash_table));
1315e01d9b2eSChandu Babu N 
1316e01d9b2eSChandu Babu N 	if (hw_feat->hash_table_size) {
1317e01d9b2eSChandu Babu N 		pdata->hash_table_shift = 26 - (hw_feat->hash_table_size >> 7);
1318e01d9b2eSChandu Babu N 		pdata->hash_table_count = hw_feat->hash_table_size / 32;
1319e01d9b2eSChandu Babu N 	}
1320e01d9b2eSChandu Babu N }
1321e01d9b2eSChandu Babu N 
13227c4158a5SRavi Kumar static void axgbe_config_mac_address(struct axgbe_port *pdata)
13237c4158a5SRavi Kumar {
13247c4158a5SRavi Kumar 	axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);
13257c4158a5SRavi Kumar }
13267c4158a5SRavi Kumar 
13277c4158a5SRavi Kumar static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)
13287c4158a5SRavi Kumar {
13297c4158a5SRavi Kumar 	unsigned int val;
13307c4158a5SRavi Kumar 
13317c4158a5SRavi Kumar 	val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;
13327c4158a5SRavi Kumar 
13337c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
13347c4158a5SRavi Kumar }
13357c4158a5SRavi Kumar 
13367c4158a5SRavi Kumar static void axgbe_config_mac_speed(struct axgbe_port *pdata)
13377c4158a5SRavi Kumar {
13387c4158a5SRavi Kumar 	axgbe_set_speed(pdata, pdata->phy_speed);
13397c4158a5SRavi Kumar }
13407c4158a5SRavi Kumar 
13417c4158a5SRavi Kumar static void axgbe_config_checksum_offload(struct axgbe_port *pdata)
13427c4158a5SRavi Kumar {
13437c4158a5SRavi Kumar 	if (pdata->rx_csum_enable)
13447c4158a5SRavi Kumar 		AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
13457c4158a5SRavi Kumar 	else
13467c4158a5SRavi Kumar 		AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
13477c4158a5SRavi Kumar }
13487c4158a5SRavi Kumar 
13499d1ef6b2SChandu Babu N static void axgbe_config_mmc(struct axgbe_port *pdata)
13509d1ef6b2SChandu Babu N {
13519d1ef6b2SChandu Babu N 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
13529d1ef6b2SChandu Babu N 
13539d1ef6b2SChandu Babu N 	/* Reset stats */
13549d1ef6b2SChandu Babu N 	memset(stats, 0, sizeof(*stats));
13559d1ef6b2SChandu Babu N 
13569d1ef6b2SChandu Babu N 	/* Set counters to reset on read */
13579d1ef6b2SChandu Babu N 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
13589d1ef6b2SChandu Babu N 
13599d1ef6b2SChandu Babu N 	/* Reset the counters */
13609d1ef6b2SChandu Babu N 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
13619d1ef6b2SChandu Babu N }
13629d1ef6b2SChandu Babu N 
13637c4158a5SRavi Kumar static int axgbe_init(struct axgbe_port *pdata)
13647c4158a5SRavi Kumar {
13657c4158a5SRavi Kumar 	int ret;
13667c4158a5SRavi Kumar 
13677c4158a5SRavi Kumar 	/* Flush Tx queues */
13687c4158a5SRavi Kumar 	ret = axgbe_flush_tx_queues(pdata);
13697c4158a5SRavi Kumar 	if (ret)
13707c4158a5SRavi Kumar 		return ret;
13717c4158a5SRavi Kumar 	/* Initialize DMA related features */
13727c4158a5SRavi Kumar 	axgbe_config_dma_bus(pdata);
13737c4158a5SRavi Kumar 	axgbe_config_dma_cache(pdata);
13747c4158a5SRavi Kumar 	axgbe_config_edma_control(pdata);
13757c4158a5SRavi Kumar 	axgbe_config_osp_mode(pdata);
13767c4158a5SRavi Kumar 	axgbe_config_pblx8(pdata);
13777c4158a5SRavi Kumar 	axgbe_config_tx_pbl_val(pdata);
13787c4158a5SRavi Kumar 	axgbe_config_rx_pbl_val(pdata);
13797c4158a5SRavi Kumar 	axgbe_config_rx_buffer_size(pdata);
13807c4158a5SRavi Kumar 	axgbe_config_rss(pdata);
13817c4158a5SRavi Kumar 	wrapper_tx_desc_init(pdata);
13827c4158a5SRavi Kumar 	ret = wrapper_rx_desc_init(pdata);
13837c4158a5SRavi Kumar 	if (ret)
13847c4158a5SRavi Kumar 		return ret;
13857c4158a5SRavi Kumar 	axgbe_enable_dma_interrupts(pdata);
13867c4158a5SRavi Kumar 
13877c4158a5SRavi Kumar 	/* Initialize MTL related features */
13887c4158a5SRavi Kumar 	axgbe_config_mtl_mode(pdata);
13897c4158a5SRavi Kumar 	axgbe_config_queue_mapping(pdata);
13907c4158a5SRavi Kumar 	axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
13917c4158a5SRavi Kumar 	axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
13927c4158a5SRavi Kumar 	axgbe_config_tx_threshold(pdata, pdata->tx_threshold);
13937c4158a5SRavi Kumar 	axgbe_config_rx_threshold(pdata, pdata->rx_threshold);
13947c4158a5SRavi Kumar 	axgbe_config_tx_fifo_size(pdata);
13957c4158a5SRavi Kumar 	axgbe_config_rx_fifo_size(pdata);
13967c4158a5SRavi Kumar 
13977c4158a5SRavi Kumar 	axgbe_enable_mtl_interrupts(pdata);
13987c4158a5SRavi Kumar 
13997c4158a5SRavi Kumar 	/* Initialize MAC related features */
1400e01d9b2eSChandu Babu N 	axgbe_config_mac_hash_table(pdata);
14017c4158a5SRavi Kumar 	axgbe_config_mac_address(pdata);
14027c4158a5SRavi Kumar 	axgbe_config_jumbo_enable(pdata);
14037c4158a5SRavi Kumar 	axgbe_config_flow_control(pdata);
14047c4158a5SRavi Kumar 	axgbe_config_mac_speed(pdata);
14057c4158a5SRavi Kumar 	axgbe_config_checksum_offload(pdata);
14069d1ef6b2SChandu Babu N 	axgbe_config_mmc(pdata);
14077c4158a5SRavi Kumar 
14087c4158a5SRavi Kumar 	return 0;
14097c4158a5SRavi Kumar }
14107c4158a5SRavi Kumar 
1411572890efSRavi Kumar void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
1412572890efSRavi Kumar {
1413572890efSRavi Kumar 	hw_if->exit = axgbe_exit;
14147c4158a5SRavi Kumar 	hw_if->config_flow_control = axgbe_config_flow_control;
14154ac7516bSRavi Kumar 
14167c4158a5SRavi Kumar 	hw_if->init = axgbe_init;
1417a5c72737SRavi Kumar 
14184ac7516bSRavi Kumar 	hw_if->read_mmd_regs = axgbe_read_mmd_regs;
14194ac7516bSRavi Kumar 	hw_if->write_mmd_regs = axgbe_write_mmd_regs;
14204ac7516bSRavi Kumar 
1421a5c72737SRavi Kumar 	hw_if->set_speed = axgbe_set_speed;
1422a5c72737SRavi Kumar 
14234ac7516bSRavi Kumar 	hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
1424627ab524SVenkat Kumar Ande 	hw_if->read_ext_mii_regs_c22 = axgbe_read_ext_mii_regs_c22;
1425627ab524SVenkat Kumar Ande 	hw_if->write_ext_mii_regs_c22 = axgbe_write_ext_mii_regs_c22;
1426627ab524SVenkat Kumar Ande 	hw_if->read_ext_mii_regs_c45 = axgbe_read_ext_mii_regs_c45;
1427627ab524SVenkat Kumar Ande 	hw_if->write_ext_mii_regs_c45 = axgbe_write_ext_mii_regs_c45;
1428627ab524SVenkat Kumar Ande 
14297c4158a5SRavi Kumar 	/* For FLOW ctrl */
14307c4158a5SRavi Kumar 	hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
14317c4158a5SRavi Kumar 	hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;
143286578516SGirish Nandibasappa 
143386578516SGirish Nandibasappa 	/*vlan*/
143486578516SGirish Nandibasappa 	hw_if->enable_rx_vlan_stripping = axgbe_enable_rx_vlan_stripping;
143586578516SGirish Nandibasappa 	hw_if->disable_rx_vlan_stripping = axgbe_disable_rx_vlan_stripping;
143686578516SGirish Nandibasappa 	hw_if->enable_rx_vlan_filtering = axgbe_enable_rx_vlan_filtering;
143786578516SGirish Nandibasappa 	hw_if->disable_rx_vlan_filtering = axgbe_disable_rx_vlan_filtering;
143886578516SGirish Nandibasappa 	hw_if->update_vlan_hash_table = axgbe_update_vlan_hash_table;
1439572890efSRavi Kumar }
1440